summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--deps/v8/.gitignore1
-rw-r--r--deps/v8/AUTHORS1
-rw-r--r--deps/v8/BUILD.gn191
-rw-r--r--deps/v8/ChangeLog571
-rw-r--r--deps/v8/DEPS14
-rw-r--r--deps/v8/Makefile14
-rw-r--r--deps/v8/OWNERS8
-rw-r--r--deps/v8/PRESUBMIT.py55
-rw-r--r--deps/v8/WATCHLISTS2
-rwxr-xr-xdeps/v8/build/download_gold_plugin.py51
-rw-r--r--deps/v8/build/features.gypi6
-rwxr-xr-xdeps/v8/build/get_landmines.py1
-rw-r--r--deps/v8/build/standalone.gypi242
-rw-r--r--deps/v8/build/toolchain.gypi4
-rw-r--r--deps/v8/include/v8-platform.h34
-rw-r--r--deps/v8/include/v8-util.h7
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h167
-rw-r--r--deps/v8/infra/project-config/README.md1
-rw-r--r--deps/v8/infra/project-config/cr-buildbucket.cfg23
-rw-r--r--deps/v8/samples/process.cc12
-rw-r--r--deps/v8/samples/shell.cc24
-rw-r--r--deps/v8/src/DEPS12
-rw-r--r--deps/v8/src/accessors.cc45
-rw-r--r--deps/v8/src/accessors.h6
-rw-r--r--deps/v8/src/allocation-tracker.cc2
-rw-r--r--deps/v8/src/allocation-tracker.h13
-rw-r--r--deps/v8/src/api-natives.h5
-rw-r--r--deps/v8/src/api.cc384
-rw-r--r--deps/v8/src/api.h82
-rw-r--r--deps/v8/src/arguments.cc4
-rw-r--r--deps/v8/src/arguments.h6
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h55
-rw-r--r--deps/v8/src/arm/assembler-arm.cc69
-rw-r--r--deps/v8/src/arm/assembler-arm.h25
-rw-r--r--deps/v8/src/arm/builtins-arm.cc437
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc344
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h2
-rw-r--r--deps/v8/src/arm/codegen-arm.cc9
-rw-r--r--deps/v8/src/arm/codegen-arm.h3
-rw-r--r--deps/v8/src/arm/constants-arm.cc2
-rw-r--r--deps/v8/src/arm/constants-arm.h6
-rw-r--r--deps/v8/src/arm/cpu-arm.cc2
-rw-r--r--deps/v8/src/arm/debug-arm.cc248
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc4
-rw-r--r--deps/v8/src/arm/disasm-arm.cc2
-rw-r--r--deps/v8/src/arm/frames-arm.cc2
-rw-r--r--deps/v8/src/arm/frames-arm.h6
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc41
-rw-r--r--deps/v8/src/arm/lithium-arm.cc52
-rw-r--r--deps/v8/src/arm/lithium-arm.h52
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc138
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.cc2
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.h2
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc47
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h78
-rw-r--r--deps/v8/src/arm/simulator-arm.cc17
-rw-r--r--deps/v8/src/arm/simulator-arm.h13
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h63
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc57
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h39
-rw-r--r--deps/v8/src/arm64/builtins-arm64.cc442
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc287
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc2
-rw-r--r--deps/v8/src/arm64/constants-arm64.h16
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc3
-rw-r--r--deps/v8/src/arm64/debug-arm64.cc305
-rw-r--r--deps/v8/src/arm64/decoder-arm64-inl.h3
-rw-r--r--deps/v8/src/arm64/decoder-arm64.cc2
-rw-r--r--deps/v8/src/arm64/decoder-arm64.h1
-rw-r--r--deps/v8/src/arm64/delayed-masm-arm64.cc2
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc5
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc21
-rw-r--r--deps/v8/src/arm64/disasm-arm64.h2
-rw-r--r--deps/v8/src/arm64/frames-arm64.cc2
-rw-r--r--deps/v8/src/arm64/frames-arm64.h6
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc2
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc6
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc40
-rw-r--r--deps/v8/src/arm64/lithium-arm64.cc54
-rw-r--r--deps/v8/src/arm64/lithium-arm64.h42
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.cc140
-rw-r--r--deps/v8/src/arm64/lithium-gap-resolver-arm64.cc2
-rw-r--r--deps/v8/src/arm64/lithium-gap-resolver-arm64.h2
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h17
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc62
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h33
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc21
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h13
-rw-r--r--deps/v8/src/arm64/utils-arm64.h1
-rw-r--r--deps/v8/src/array-iterator.js12
-rw-r--r--deps/v8/src/array.js107
-rw-r--r--deps/v8/src/arraybuffer.js3
-rw-r--r--deps/v8/src/assembler.cc413
-rw-r--r--deps/v8/src/assembler.h150
-rw-r--r--deps/v8/src/assert-scope.cc2
-rw-r--r--deps/v8/src/ast-literal-reindexer.cc4
-rw-r--r--deps/v8/src/ast-literal-reindexer.h2
-rw-r--r--deps/v8/src/ast-numbering.cc53
-rw-r--r--deps/v8/src/ast-numbering.h10
-rw-r--r--deps/v8/src/ast-value-factory.h2
-rw-r--r--deps/v8/src/ast.cc29
-rw-r--r--deps/v8/src/ast.h67
-rw-r--r--deps/v8/src/background-parsing-task.cc10
-rw-r--r--deps/v8/src/background-parsing-task.h12
-rw-r--r--deps/v8/src/bailout-reason.h39
-rw-r--r--deps/v8/src/base/build_config.h5
-rw-r--r--deps/v8/src/base/platform/time.cc28
-rw-r--r--deps/v8/src/base/smart-pointers.h (renamed from deps/v8/src/smart-pointers.h)60
-rw-r--r--deps/v8/src/basic-block-profiler.h3
-rw-r--r--deps/v8/src/bignum-dtoa.cc7
-rw-r--r--deps/v8/src/bignum-dtoa.h2
-rw-r--r--deps/v8/src/bignum.cc2
-rw-r--r--deps/v8/src/bignum.h2
-rw-r--r--deps/v8/src/bootstrapper.cc740
-rw-r--r--deps/v8/src/bootstrapper.h20
-rw-r--r--deps/v8/src/builtins.cc137
-rw-r--r--deps/v8/src/builtins.h33
-rw-r--r--deps/v8/src/cached-powers.cc3
-rw-r--r--deps/v8/src/cancelable-task.cc28
-rw-r--r--deps/v8/src/cancelable-task.h74
-rw-r--r--deps/v8/src/char-predicates.h1
-rw-r--r--deps/v8/src/checks.cc9
-rw-r--r--deps/v8/src/code-factory.cc33
-rw-r--r--deps/v8/src/code-factory.h10
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc244
-rw-r--r--deps/v8/src/code-stubs.cc89
-rw-r--r--deps/v8/src/code-stubs.h181
-rw-r--r--deps/v8/src/code-stubs.js69
-rw-r--r--deps/v8/src/codegen.cc26
-rw-r--r--deps/v8/src/codegen.h2
-rw-r--r--deps/v8/src/collection.js40
-rw-r--r--deps/v8/src/compilation-dependencies.cc4
-rw-r--r--deps/v8/src/compilation-dependencies.h3
-rw-r--r--deps/v8/src/compiler.cc314
-rw-r--r--deps/v8/src/compiler.h63
-rw-r--r--deps/v8/src/compiler/OWNERS5
-rw-r--r--deps/v8/src/compiler/access-builder.cc200
-rw-r--r--deps/v8/src/compiler/access-builder.h3
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc136
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc18
-rw-r--r--deps/v8/src/compiler/arm/linkage-arm.cc74
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc110
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc8
-rw-r--r--deps/v8/src/compiler/arm64/linkage-arm64.cc76
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc257
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h21
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.cc2
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.h1
-rw-r--r--deps/v8/src/compiler/c-linkage.cc228
-rw-r--r--deps/v8/src/compiler/coalesced-live-ranges.cc191
-rw-r--r--deps/v8/src/compiler/coalesced-live-ranges.h151
-rw-r--r--deps/v8/src/compiler/code-generator.cc74
-rw-r--r--deps/v8/src/compiler/common-node-cache.cc2
-rw-r--r--deps/v8/src/compiler/common-operator.cc6
-rw-r--r--deps/v8/src/compiler/common-operator.h3
-rw-r--r--deps/v8/src/compiler/frame-states.cc1
-rw-r--r--deps/v8/src/compiler/frame-states.h23
-rw-r--r--deps/v8/src/compiler/frame.cc24
-rw-r--r--deps/v8/src/compiler/frame.h141
-rw-r--r--deps/v8/src/compiler/graph-builder.h82
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc4
-rw-r--r--deps/v8/src/compiler/greedy-allocator.cc47
-rw-r--r--deps/v8/src/compiler/greedy-allocator.h30
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc107
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc31
-rw-r--r--deps/v8/src/compiler/ia32/linkage-ia32.cc65
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h21
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc54
-rw-r--r--deps/v8/src/compiler/instruction.cc9
-rw-r--r--deps/v8/src/compiler/instruction.h4
-rw-r--r--deps/v8/src/compiler/interpreter-assembler.cc265
-rw-r--r--deps/v8/src/compiler/interpreter-assembler.h118
-rw-r--r--deps/v8/src/compiler/js-context-relaxation.cc67
-rw-r--r--deps/v8/src/compiler/js-context-relaxation.h32
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc62
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h4
-rw-r--r--deps/v8/src/compiler/js-frame-specialization.h4
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc100
-rw-r--r--deps/v8/src/compiler/js-inlining.cc13
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc10
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h1
-rw-r--r--deps/v8/src/compiler/js-operator.cc106
-rw-r--r--deps/v8/src/compiler/js-operator.h99
-rw-r--r--deps/v8/src/compiler/js-type-feedback-lowering.cc118
-rw-r--r--deps/v8/src/compiler/js-type-feedback-lowering.h66
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc43
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h1
-rw-r--r--deps/v8/src/compiler/linkage-impl.h303
-rw-r--r--deps/v8/src/compiler/linkage.cc360
-rw-r--r--deps/v8/src/compiler/linkage.h119
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc35
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h1
-rw-r--r--deps/v8/src/compiler/machine-type.h5
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc142
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc24
-rw-r--r--deps/v8/src/compiler/mips/linkage-mips.cc73
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc151
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc20
-rw-r--r--deps/v8/src/compiler/mips64/linkage-mips64.cc73
-rw-r--r--deps/v8/src/compiler/move-optimizer.cc11
-rw-r--r--deps/v8/src/compiler/node.h6
-rw-r--r--deps/v8/src/compiler/osr.cc2
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.cc2
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.h2
-rw-r--r--deps/v8/src/compiler/pipeline.cc87
-rw-r--r--deps/v8/src/compiler/pipeline.h5
-rw-r--r--deps/v8/src/compiler/ppc/OWNERS1
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc129
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc28
-rw-r--r--deps/v8/src/compiler/ppc/linkage-ppc.cc72
-rw-r--r--deps/v8/src/compiler/preprocess-live-ranges.cc169
-rw-r--r--deps/v8/src/compiler/preprocess-live-ranges.h35
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc71
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h144
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.cc8
-rw-r--r--deps/v8/src/compiler/register-allocator.cc203
-rw-r--r--deps/v8/src/compiler/register-allocator.h46
-rw-r--r--deps/v8/src/compiler/register-configuration.cc6
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc27
-rw-r--r--deps/v8/src/compiler/simplified-operator.h1
-rw-r--r--deps/v8/src/compiler/source-position.cc1
-rw-r--r--deps/v8/src/compiler/typer.cc13
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc177
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc31
-rw-r--r--deps/v8/src/compiler/x64/linkage-x64.cc93
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc129
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc27
-rw-r--r--deps/v8/src/compiler/x87/linkage-x87.cc65
-rw-r--r--deps/v8/src/context-measure.cc76
-rw-r--r--deps/v8/src/context-measure.h47
-rw-r--r--deps/v8/src/contexts.cc14
-rw-r--r--deps/v8/src/contexts.h191
-rw-r--r--deps/v8/src/conversions-inl.h68
-rw-r--r--deps/v8/src/conversions.h74
-rw-r--r--deps/v8/src/counters.h1
-rw-r--r--deps/v8/src/cpu-profiler.cc3
-rw-r--r--deps/v8/src/cpu-profiler.h4
-rw-r--r--deps/v8/src/d8-debug.cc132
-rw-r--r--deps/v8/src/d8-debug.h20
-rw-r--r--deps/v8/src/d8-posix.cc209
-rw-r--r--deps/v8/src/d8-readline.cc152
-rw-r--r--deps/v8/src/d8-windows.cc3
-rw-r--r--deps/v8/src/d8.cc985
-rw-r--r--deps/v8/src/d8.gyp14
-rw-r--r--deps/v8/src/d8.h102
-rw-r--r--deps/v8/src/d8.js1959
-rw-r--r--deps/v8/src/date.js120
-rw-r--r--deps/v8/src/dateparser-inl.h25
-rw-r--r--deps/v8/src/dateparser.cc12
-rw-r--r--deps/v8/src/dateparser.h7
-rw-r--r--deps/v8/src/debug/OWNERS7
-rw-r--r--deps/v8/src/debug/arm/OWNERS1
-rw-r--r--deps/v8/src/debug/arm/debug-arm.cc159
-rw-r--r--deps/v8/src/debug/arm64/OWNERS1
-rw-r--r--deps/v8/src/debug/arm64/debug-arm64.cc166
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc314
-rw-r--r--deps/v8/src/debug/debug-evaluate.h100
-rw-r--r--deps/v8/src/debug/debug-frames.cc219
-rw-r--r--deps/v8/src/debug/debug-frames.h81
-rw-r--r--deps/v8/src/debug/debug-scopes.cc769
-rw-r--r--deps/v8/src/debug/debug-scopes.h126
-rw-r--r--deps/v8/src/debug/debug.cc (renamed from deps/v8/src/debug.cc)1377
-rw-r--r--deps/v8/src/debug/debug.h (renamed from deps/v8/src/debug.h)140
-rw-r--r--deps/v8/src/debug/debug.js (renamed from deps/v8/src/debug-debugger.js)232
-rw-r--r--deps/v8/src/debug/ia32/debug-ia32.cc145
-rw-r--r--deps/v8/src/debug/liveedit.cc (renamed from deps/v8/src/liveedit.cc)76
-rw-r--r--deps/v8/src/debug/liveedit.h (renamed from deps/v8/src/liveedit.h)14
-rw-r--r--deps/v8/src/debug/liveedit.js (renamed from deps/v8/src/liveedit-debugger.js)102
-rw-r--r--deps/v8/src/debug/mips/OWNERS5
-rw-r--r--deps/v8/src/debug/mips/debug-mips.cc148
-rw-r--r--deps/v8/src/debug/mips64/OWNERS5
-rw-r--r--deps/v8/src/debug/mips64/debug-mips64.cc150
-rw-r--r--deps/v8/src/debug/mirrors.js (renamed from deps/v8/src/mirror-debugger.js)355
-rw-r--r--deps/v8/src/debug/ppc/OWNERS5
-rw-r--r--deps/v8/src/debug/ppc/debug-ppc.cc157
-rw-r--r--deps/v8/src/debug/x64/debug-x64.cc146
-rw-r--r--deps/v8/src/debug/x87/OWNERS1
-rw-r--r--deps/v8/src/debug/x87/debug-x87.cc145
-rw-r--r--deps/v8/src/deoptimizer.cc129
-rw-r--r--deps/v8/src/deoptimizer.h9
-rw-r--r--deps/v8/src/disasm.h2
-rw-r--r--deps/v8/src/disassembler.cc21
-rw-r--r--deps/v8/src/diy-fp.cc5
-rw-r--r--deps/v8/src/diy-fp.h4
-rw-r--r--deps/v8/src/effects.h2
-rw-r--r--deps/v8/src/elements-kind.cc47
-rw-r--r--deps/v8/src/elements-kind.h40
-rw-r--r--deps/v8/src/elements.cc297
-rw-r--r--deps/v8/src/elements.h28
-rw-r--r--deps/v8/src/execution.cc88
-rw-r--r--deps/v8/src/execution.h11
-rw-r--r--deps/v8/src/expression-classifier.h17
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc4
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.h2
-rw-r--r--deps/v8/src/extensions/free-buffer-extension.h2
-rw-r--r--deps/v8/src/extensions/gc-extension.h3
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc2
-rw-r--r--deps/v8/src/extensions/statistics-extension.h2
-rw-r--r--deps/v8/src/extensions/trigger-failure-extension.h2
-rw-r--r--deps/v8/src/factory.cc237
-rw-r--r--deps/v8/src/factory.h80
-rw-r--r--deps/v8/src/field-index-inl.h6
-rw-r--r--deps/v8/src/flag-definitions.h74
-rw-r--r--deps/v8/src/frames-inl.h6
-rw-r--r--deps/v8/src/frames.cc32
-rw-r--r--deps/v8/src/frames.h17
-rw-r--r--deps/v8/src/full-codegen/OWNERS8
-rw-r--r--deps/v8/src/full-codegen/arm/full-codegen-arm.cc (renamed from deps/v8/src/arm/full-codegen-arm.cc)501
-rw-r--r--deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc (renamed from deps/v8/src/arm64/full-codegen-arm64.cc)553
-rw-r--r--deps/v8/src/full-codegen/full-codegen.cc (renamed from deps/v8/src/full-codegen.cc)171
-rw-r--r--deps/v8/src/full-codegen/full-codegen.h (renamed from deps/v8/src/full-codegen.h)45
-rw-r--r--deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc (renamed from deps/v8/src/ia32/full-codegen-ia32.cc)494
-rw-r--r--deps/v8/src/full-codegen/mips/OWNERS5
-rw-r--r--deps/v8/src/full-codegen/mips/full-codegen-mips.cc (renamed from deps/v8/src/mips/full-codegen-mips.cc)520
-rw-r--r--deps/v8/src/full-codegen/mips64/OWNERS5
-rw-r--r--deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc (renamed from deps/v8/src/mips64/full-codegen-mips64.cc)522
-rw-r--r--deps/v8/src/full-codegen/ppc/OWNERS5
-rw-r--r--deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc (renamed from deps/v8/src/ppc/full-codegen-ppc.cc)496
-rw-r--r--deps/v8/src/full-codegen/x64/full-codegen-x64.cc (renamed from deps/v8/src/x64/full-codegen-x64.cc)552
-rw-r--r--deps/v8/src/full-codegen/x87/OWNERS1
-rw-r--r--deps/v8/src/full-codegen/x87/full-codegen-x87.cc (renamed from deps/v8/src/x87/full-codegen-x87.cc)493
-rw-r--r--deps/v8/src/func-name-inferrer.cc3
-rw-r--r--deps/v8/src/futex-emulation.cc231
-rw-r--r--deps/v8/src/futex-emulation.h124
-rw-r--r--deps/v8/src/gdb-jit.cc62
-rw-r--r--deps/v8/src/gdb-jit.h2
-rw-r--r--deps/v8/src/global-handles.cc71
-rw-r--r--deps/v8/src/global-handles.h11
-rw-r--r--deps/v8/src/globals.h35
-rw-r--r--deps/v8/src/handles-inl.h76
-rw-r--r--deps/v8/src/handles.cc30
-rw-r--r--deps/v8/src/handles.h305
-rw-r--r--deps/v8/src/harmony-array-includes.js67
-rw-r--r--deps/v8/src/harmony-array.js23
-rw-r--r--deps/v8/src/harmony-atomics.js99
-rw-r--r--deps/v8/src/harmony-object-observe.js14
-rw-r--r--deps/v8/src/harmony-object.js5
-rw-r--r--deps/v8/src/harmony-regexp.js10
-rw-r--r--deps/v8/src/harmony-simd.js682
-rw-r--r--deps/v8/src/harmony-typedarray.js8
-rw-r--r--deps/v8/src/heap-profiler.cc3
-rw-r--r--deps/v8/src/heap-profiler.h15
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc35
-rw-r--r--deps/v8/src/heap-snapshot-generator.h1
-rw-r--r--deps/v8/src/heap/OWNERS5
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc15
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.h4
-rw-r--r--deps/v8/src/heap/gc-tracer.cc113
-rw-r--r--deps/v8/src/heap/gc-tracer.h17
-rw-r--r--deps/v8/src/heap/heap-inl.h103
-rw-r--r--deps/v8/src/heap/heap.cc874
-rw-r--r--deps/v8/src/heap/heap.h440
-rw-r--r--deps/v8/src/heap/identity-map.cc4
-rw-r--r--deps/v8/src/heap/identity-map.h2
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h6
-rw-r--r--deps/v8/src/heap/incremental-marking.cc75
-rw-r--r--deps/v8/src/heap/incremental-marking.h7
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h134
-rw-r--r--deps/v8/src/heap/mark-compact.cc499
-rw-r--r--deps/v8/src/heap/mark-compact.h208
-rw-r--r--deps/v8/src/heap/memory-reducer.cc29
-rw-r--r--deps/v8/src/heap/memory-reducer.h31
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h131
-rw-r--r--deps/v8/src/heap/objects-visiting.cc155
-rw-r--r--deps/v8/src/heap/objects-visiting.h37
-rw-r--r--deps/v8/src/heap/spaces-inl.h149
-rw-r--r--deps/v8/src/heap/spaces.cc138
-rw-r--r--deps/v8/src/heap/spaces.h196
-rw-r--r--deps/v8/src/heap/store-buffer-inl.h1
-rw-r--r--deps/v8/src/heap/store-buffer.cc39
-rw-r--r--deps/v8/src/hydrogen-instructions.cc70
-rw-r--r--deps/v8/src/hydrogen-instructions.h210
-rw-r--r--deps/v8/src/hydrogen-uint32-analysis.cc12
-rw-r--r--deps/v8/src/hydrogen.cc617
-rw-r--r--deps/v8/src/hydrogen.h82
-rw-r--r--deps/v8/src/i18n.cc22
-rw-r--r--deps/v8/src/i18n.h5
-rw-r--r--deps/v8/src/i18n.js90
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h74
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc26
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h26
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc428
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc335
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc2
-rw-r--r--deps/v8/src/ia32/cpu-ia32.cc2
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc283
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc5
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc13
-rw-r--r--deps/v8/src/ia32/frames-ia32.cc2
-rw-r--r--deps/v8/src/ia32/frames-ia32.h6
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc53
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc136
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc2
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.h2
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc55
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h60
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc36
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h26
-rw-r--r--deps/v8/src/ic/OWNERS6
-rw-r--r--deps/v8/src/ic/access-compiler.cc2
-rw-r--r--deps/v8/src/ic/arm/access-compiler-arm.cc4
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc39
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc78
-rw-r--r--deps/v8/src/ic/arm/ic-compiler-arm.cc4
-rw-r--r--deps/v8/src/ic/arm/stub-cache-arm.cc21
-rw-r--r--deps/v8/src/ic/arm64/access-compiler-arm64.cc4
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc39
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc77
-rw-r--r--deps/v8/src/ic/arm64/ic-compiler-arm64.cc4
-rw-r--r--deps/v8/src/ic/arm64/stub-cache-arm64.cc21
-rw-r--r--deps/v8/src/ic/call-optimization.cc2
-rw-r--r--deps/v8/src/ic/handler-compiler.cc40
-rw-r--r--deps/v8/src/ic/handler-compiler.h5
-rw-r--r--deps/v8/src/ic/ia32/access-compiler-ia32.cc4
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc40
-rw-r--r--deps/v8/src/ic/ia32/ic-compiler-ia32.cc4
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc81
-rw-r--r--deps/v8/src/ic/ia32/stub-cache-ia32.cc23
-rw-r--r--deps/v8/src/ic/ic-compiler.cc6
-rw-r--r--deps/v8/src/ic/ic-inl.h74
-rw-r--r--deps/v8/src/ic/ic-state.cc3
-rw-r--r--deps/v8/src/ic/ic-state.h18
-rw-r--r--deps/v8/src/ic/ic.cc212
-rw-r--r--deps/v8/src/ic/ic.h110
-rw-r--r--deps/v8/src/ic/mips/access-compiler-mips.cc4
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc39
-rw-r--r--deps/v8/src/ic/mips/ic-compiler-mips.cc4
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc79
-rw-r--r--deps/v8/src/ic/mips/stub-cache-mips.cc21
-rw-r--r--deps/v8/src/ic/mips64/access-compiler-mips64.cc4
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc39
-rw-r--r--deps/v8/src/ic/mips64/ic-compiler-mips64.cc4
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc72
-rw-r--r--deps/v8/src/ic/mips64/stub-cache-mips64.cc21
-rw-r--r--deps/v8/src/ic/ppc/OWNERS1
-rw-r--r--deps/v8/src/ic/ppc/access-compiler-ppc.cc4
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc39
-rw-r--r--deps/v8/src/ic/ppc/ic-compiler-ppc.cc4
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc69
-rw-r--r--deps/v8/src/ic/ppc/stub-cache-ppc.cc21
-rw-r--r--deps/v8/src/ic/stub-cache.cc3
-rw-r--r--deps/v8/src/ic/stub-cache.h6
-rw-r--r--deps/v8/src/ic/x64/access-compiler-x64.cc4
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc37
-rw-r--r--deps/v8/src/ic/x64/ic-compiler-x64.cc4
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc64
-rw-r--r--deps/v8/src/ic/x64/stub-cache-x64.cc20
-rw-r--r--deps/v8/src/ic/x87/access-compiler-x87.cc4
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc40
-rw-r--r--deps/v8/src/ic/x87/ic-compiler-x87.cc4
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc81
-rw-r--r--deps/v8/src/ic/x87/stub-cache-x87.cc23
-rw-r--r--deps/v8/src/interface-descriptors.cc77
-rw-r--r--deps/v8/src/interface-descriptors.h87
-rw-r--r--deps/v8/src/interpreter/DEPS4
-rw-r--r--deps/v8/src/interpreter/OWNERS1
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc222
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h122
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc370
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h44
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc158
-rw-r--r--deps/v8/src/interpreter/bytecodes.h119
-rw-r--r--deps/v8/src/interpreter/interpreter.cc231
-rw-r--r--deps/v8/src/interpreter/interpreter.h60
-rw-r--r--deps/v8/src/isolate.cc131
-rw-r--r--deps/v8/src/isolate.h59
-rw-r--r--deps/v8/src/json-parser.h9
-rw-r--r--deps/v8/src/json-stringifier.h12
-rw-r--r--deps/v8/src/json.js23
-rw-r--r--deps/v8/src/layout-descriptor-inl.h13
-rw-r--r--deps/v8/src/layout-descriptor.cc4
-rw-r--r--deps/v8/src/layout-descriptor.h4
-rw-r--r--deps/v8/src/libplatform/default-platform.cc9
-rw-r--r--deps/v8/src/libplatform/default-platform.h3
-rw-r--r--deps/v8/src/list-inl.h15
-rw-r--r--deps/v8/src/list.h12
-rw-r--r--deps/v8/src/lithium-allocator.cc2
-rw-r--r--deps/v8/src/lithium-allocator.h2
-rw-r--r--deps/v8/src/lithium-codegen.h2
-rw-r--r--deps/v8/src/log-utils.cc7
-rw-r--r--deps/v8/src/log-utils.h2
-rw-r--r--deps/v8/src/log.cc36
-rw-r--r--deps/v8/src/lookup.cc137
-rw-r--r--deps/v8/src/lookup.h12
-rw-r--r--deps/v8/src/macros.py14
-rw-r--r--deps/v8/src/math.js17
-rw-r--r--deps/v8/src/messages.cc143
-rw-r--r--deps/v8/src/messages.h88
-rw-r--r--deps/v8/src/messages.js190
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h60
-rw-r--r--deps/v8/src/mips/assembler-mips.cc21
-rw-r--r--deps/v8/src/mips/assembler-mips.h22
-rw-r--r--deps/v8/src/mips/builtins-mips.cc453
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc334
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h8
-rw-r--r--deps/v8/src/mips/codegen-mips.cc9
-rw-r--r--deps/v8/src/mips/codegen-mips.h3
-rw-r--r--deps/v8/src/mips/constants-mips.cc2
-rw-r--r--deps/v8/src/mips/cpu-mips.cc2
-rw-r--r--deps/v8/src/mips/debug-mips.cc252
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc5
-rw-r--r--deps/v8/src/mips/disasm-mips.cc19
-rw-r--r--deps/v8/src/mips/frames-mips.cc2
-rw-r--r--deps/v8/src/mips/frames-mips.h6
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc41
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc147
-rw-r--r--deps/v8/src/mips/lithium-gap-resolver-mips.cc2
-rw-r--r--deps/v8/src/mips/lithium-gap-resolver-mips.h2
-rw-r--r--deps/v8/src/mips/lithium-mips.cc52
-rw-r--r--deps/v8/src/mips/lithium-mips.h52
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc58
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h47
-rw-r--r--deps/v8/src/mips/simulator-mips.cc17
-rw-r--r--deps/v8/src/mips/simulator-mips.h14
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h60
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc130
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h27
-rw-r--r--deps/v8/src/mips64/builtins-mips64.cc455
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc381
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.h8
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc9
-rw-r--r--deps/v8/src/mips64/codegen-mips64.h3
-rw-r--r--deps/v8/src/mips64/constants-mips64.cc2
-rw-r--r--deps/v8/src/mips64/constants-mips64.h6
-rw-r--r--deps/v8/src/mips64/cpu-mips64.cc2
-rw-r--r--deps/v8/src/mips64/debug-mips64.cc256
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc4
-rw-r--r--deps/v8/src/mips64/disasm-mips64.cc18
-rw-r--r--deps/v8/src/mips64/frames-mips64.cc3
-rw-r--r--deps/v8/src/mips64/frames-mips64.h6
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc41
-rw-r--r--deps/v8/src/mips64/lithium-codegen-mips64.cc150
-rw-r--r--deps/v8/src/mips64/lithium-gap-resolver-mips64.cc2
-rw-r--r--deps/v8/src/mips64/lithium-gap-resolver-mips64.h2
-rw-r--r--deps/v8/src/mips64/lithium-mips64.cc52
-rw-r--r--deps/v8/src/mips64/lithium-mips64.h52
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc70
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h47
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc34
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h14
-rw-r--r--deps/v8/src/modules.cc2
-rw-r--r--deps/v8/src/object-observe.js47
-rw-r--r--deps/v8/src/objects-debug.cc127
-rw-r--r--deps/v8/src/objects-inl.h1492
-rw-r--r--deps/v8/src/objects-printer.cc169
-rw-r--r--deps/v8/src/objects.cc1541
-rw-r--r--deps/v8/src/objects.h1472
-rw-r--r--deps/v8/src/optimizing-compile-dispatcher.cc2
-rw-r--r--deps/v8/src/parser.cc519
-rw-r--r--deps/v8/src/parser.h131
-rw-r--r--deps/v8/src/pattern-rewriter.cc4
-rw-r--r--deps/v8/src/pending-compilation-error-handler.cc6
-rw-r--r--deps/v8/src/ppc/OWNERS1
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h80
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc138
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h98
-rw-r--r--deps/v8/src/ppc/builtins-ppc.cc447
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc410
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.h6
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc4
-rw-r--r--deps/v8/src/ppc/codegen-ppc.h3
-rw-r--r--deps/v8/src/ppc/constants-ppc.cc2
-rw-r--r--deps/v8/src/ppc/constants-ppc.h6
-rw-r--r--deps/v8/src/ppc/cpu-ppc.cc1
-rw-r--r--deps/v8/src/ppc/debug-ppc.cc261
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc4
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc118
-rw-r--r--deps/v8/src/ppc/frames-ppc.cc2
-rw-r--r--deps/v8/src/ppc/frames-ppc.h43
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc41
-rw-r--r--deps/v8/src/ppc/lithium-codegen-ppc.cc135
-rw-r--r--deps/v8/src/ppc/lithium-gap-resolver-ppc.cc2
-rw-r--r--deps/v8/src/ppc/lithium-gap-resolver-ppc.h2
-rw-r--r--deps/v8/src/ppc/lithium-ppc.cc52
-rw-r--r--deps/v8/src/ppc/lithium-ppc.h50
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc135
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h49
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc86
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h17
-rw-r--r--deps/v8/src/preparser.cc51
-rw-r--r--deps/v8/src/preparser.h428
-rw-r--r--deps/v8/src/prettyprinter.cc31
-rw-r--r--deps/v8/src/profile-generator.cc6
-rw-r--r--deps/v8/src/profile-generator.h1
-rw-r--r--deps/v8/src/prologue.js133
-rw-r--r--deps/v8/src/promise.js25
-rw-r--r--deps/v8/src/property-details.h4
-rw-r--r--deps/v8/src/property.h1
-rw-r--r--deps/v8/src/proxy.js17
-rw-r--r--deps/v8/src/regexp.js4
-rw-r--r--deps/v8/src/regexp/OWNERS6
-rw-r--r--deps/v8/src/regexp/arm/OWNERS1
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc (renamed from deps/v8/src/arm/regexp-macro-assembler-arm.cc)10
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h (renamed from deps/v8/src/arm/regexp-macro-assembler-arm.h)8
-rw-r--r--deps/v8/src/regexp/arm64/OWNERS1
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc (renamed from deps/v8/src/arm64/regexp-macro-assembler-arm64.cc)10
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h (renamed from deps/v8/src/arm64/regexp-macro-assembler-arm64.h)11
-rw-r--r--deps/v8/src/regexp/bytecodes-irregexp.h (renamed from deps/v8/src/bytecodes-irregexp.h)6
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc (renamed from deps/v8/src/ia32/regexp-macro-assembler-ia32.cc)10
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h (renamed from deps/v8/src/ia32/regexp-macro-assembler-ia32.h)8
-rw-r--r--deps/v8/src/regexp/interpreter-irregexp.cc (renamed from deps/v8/src/interpreter-irregexp.cc)10
-rw-r--r--deps/v8/src/regexp/interpreter-irregexp.h (renamed from deps/v8/src/interpreter-irregexp.h)8
-rw-r--r--deps/v8/src/regexp/jsregexp-inl.h (renamed from deps/v8/src/jsregexp-inl.h)8
-rw-r--r--deps/v8/src/regexp/jsregexp.cc (renamed from deps/v8/src/jsregexp.cc)35
-rw-r--r--deps/v8/src/regexp/jsregexp.h (renamed from deps/v8/src/jsregexp.h)6
-rw-r--r--deps/v8/src/regexp/mips/OWNERS5
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc (renamed from deps/v8/src/mips/regexp-macro-assembler-mips.cc)10
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h (renamed from deps/v8/src/mips/regexp-macro-assembler-mips.h)10
-rw-r--r--deps/v8/src/regexp/mips64/OWNERS5
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc (renamed from deps/v8/src/mips64/regexp-macro-assembler-mips64.cc)10
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h (renamed from deps/v8/src/mips64/regexp-macro-assembler-mips64.h)10
-rw-r--r--deps/v8/src/regexp/ppc/OWNERS5
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc (renamed from deps/v8/src/ppc/regexp-macro-assembler-ppc.cc)17
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h (renamed from deps/v8/src/ppc/regexp-macro-assembler-ppc.h)9
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h (renamed from deps/v8/src/regexp-macro-assembler-irregexp-inl.h)13
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc (renamed from deps/v8/src/regexp-macro-assembler-irregexp.cc)9
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp.h (renamed from deps/v8/src/regexp-macro-assembler-irregexp.h)9
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.cc (renamed from deps/v8/src/regexp-macro-assembler-tracer.cc)4
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.h (renamed from deps/v8/src/regexp-macro-assembler-tracer.h)8
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc (renamed from deps/v8/src/regexp-macro-assembler.cc)5
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h (renamed from deps/v8/src/regexp-macro-assembler.h)6
-rw-r--r--deps/v8/src/regexp/regexp-stack.cc (renamed from deps/v8/src/regexp-stack.cc)5
-rw-r--r--deps/v8/src/regexp/regexp-stack.h (renamed from deps/v8/src/regexp-stack.h)10
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc (renamed from deps/v8/src/x64/regexp-macro-assembler-x64.cc)9
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h (renamed from deps/v8/src/x64/regexp-macro-assembler-x64.h)9
-rw-r--r--deps/v8/src/regexp/x87/OWNERS1
-rw-r--r--deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc (renamed from deps/v8/src/x87/regexp-macro-assembler-x87.cc)9
-rw-r--r--deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h (renamed from deps/v8/src/x87/regexp-macro-assembler-x87.h)8
-rw-r--r--deps/v8/src/rewriter.cc5
-rw-r--r--deps/v8/src/runtime-profiler.cc14
-rw-r--r--deps/v8/src/runtime.js394
-rw-r--r--deps/v8/src/runtime/runtime-array.cc87
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc136
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc217
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc8
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc15
-rw-r--r--deps/v8/src/runtime/runtime-date.cc5
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc1722
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc5
-rw-r--r--deps/v8/src/runtime/runtime-function.cc35
-rw-r--r--deps/v8/src/runtime/runtime-futex.cc93
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc5
-rw-r--r--deps/v8/src/runtime/runtime-i18n.cc5
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc170
-rw-r--r--deps/v8/src/runtime/runtime-json.cc5
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc3
-rw-r--r--deps/v8/src/runtime/runtime-liveedit.cc79
-rw-r--r--deps/v8/src/runtime/runtime-maths.cc12
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc29
-rw-r--r--deps/v8/src/runtime/runtime-object.cc416
-rw-r--r--deps/v8/src/runtime/runtime-observe.cc5
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc5
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc19
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc102
-rw-r--r--deps/v8/src/runtime/runtime-simd.cc821
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc32
-rw-r--r--deps/v8/src/runtime/runtime-symbol.cc12
-rw-r--r--deps/v8/src/runtime/runtime-test.cc24
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc43
-rw-r--r--deps/v8/src/runtime/runtime-uri.cc5
-rw-r--r--deps/v8/src/runtime/runtime.cc6
-rw-r--r--deps/v8/src/runtime/runtime.h289
-rw-r--r--deps/v8/src/safepoint-table.cc3
-rw-r--r--deps/v8/src/sampler.h1
-rw-r--r--deps/v8/src/scanner.cc145
-rw-r--r--deps/v8/src/scanner.h16
-rw-r--r--deps/v8/src/scopeinfo.cc50
-rw-r--r--deps/v8/src/scopes.cc155
-rw-r--r--deps/v8/src/scopes.h63
-rw-r--r--deps/v8/src/snapshot/OWNERS3
-rw-r--r--deps/v8/src/snapshot/natives-common.cc56
-rw-r--r--deps/v8/src/snapshot/natives-external.cc21
-rw-r--r--deps/v8/src/snapshot/natives.h12
-rw-r--r--deps/v8/src/snapshot/serialize.cc175
-rw-r--r--deps/v8/src/snapshot/serialize.h35
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc4
-rw-r--r--deps/v8/src/string-builder.cc2
-rw-r--r--deps/v8/src/string-builder.h7
-rw-r--r--deps/v8/src/string-iterator.js2
-rw-r--r--deps/v8/src/string-search.cc20
-rw-r--r--deps/v8/src/string-stream.cc4
-rw-r--r--deps/v8/src/string-stream.h8
-rw-r--r--deps/v8/src/string.js60
-rw-r--r--deps/v8/src/strings-storage.cc2
-rw-r--r--deps/v8/src/strtod.cc5
-rw-r--r--deps/v8/src/strtod.h2
-rw-r--r--deps/v8/src/symbol.js7
-rw-r--r--deps/v8/src/third_party/fdlibm/fdlibm.cc9
-rw-r--r--deps/v8/src/third_party/vtune/v8vtune.gyp4
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.cc5
-rw-r--r--deps/v8/src/transitions.cc11
-rw-r--r--deps/v8/src/type-feedback-vector.cc28
-rw-r--r--deps/v8/src/type-feedback-vector.h49
-rw-r--r--deps/v8/src/type-info.cc40
-rw-r--r--deps/v8/src/typedarray.js3
-rw-r--r--deps/v8/src/types-inl.h14
-rw-r--r--deps/v8/src/types.cc4
-rw-r--r--deps/v8/src/types.h8
-rw-r--r--deps/v8/src/typing.cc8
-rw-r--r--deps/v8/src/typing.h2
-rw-r--r--deps/v8/src/unique.h2
-rw-r--r--deps/v8/src/uri.js17
-rw-r--r--deps/v8/src/utils.cc5
-rw-r--r--deps/v8/src/v8.cc4
-rw-r--r--deps/v8/src/v8.h37
-rw-r--r--deps/v8/src/v8natives.js110
-rw-r--r--deps/v8/src/v8threads.cc4
-rw-r--r--deps/v8/src/variables.cc8
-rw-r--r--deps/v8/src/variables.h1
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h74
-rw-r--r--deps/v8/src/x64/assembler-x64.h27
-rw-r--r--deps/v8/src/x64/builtins-x64.cc415
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc317
-rw-r--r--deps/v8/src/x64/codegen-x64.cc2
-rw-r--r--deps/v8/src/x64/codegen-x64.h3
-rw-r--r--deps/v8/src/x64/cpu-x64.cc2
-rw-r--r--deps/v8/src/x64/debug-x64.cc263
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc4
-rw-r--r--deps/v8/src/x64/disasm-x64.cc2
-rw-r--r--deps/v8/src/x64/frames-x64.cc2
-rw-r--r--deps/v8/src/x64/frames-x64.h6
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc43
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc137
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h1
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.cc2
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.h2
-rw-r--r--deps/v8/src/x64/lithium-x64.cc58
-rw-r--r--deps/v8/src/x64/lithium-x64.h60
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc76
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h60
-rw-r--r--deps/v8/src/x87/assembler-x87-inl.h74
-rw-r--r--deps/v8/src/x87/assembler-x87.cc9
-rw-r--r--deps/v8/src/x87/assembler-x87.h22
-rw-r--r--deps/v8/src/x87/builtins-x87.cc428
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc357
-rw-r--r--deps/v8/src/x87/codegen-x87.cc2
-rw-r--r--deps/v8/src/x87/cpu-x87.cc2
-rw-r--r--deps/v8/src/x87/debug-x87.cc282
-rw-r--r--deps/v8/src/x87/deoptimizer-x87.cc5
-rw-r--r--deps/v8/src/x87/disasm-x87.cc13
-rw-r--r--deps/v8/src/x87/frames-x87.cc2
-rw-r--r--deps/v8/src/x87/frames-x87.h6
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc53
-rw-r--r--deps/v8/src/x87/lithium-codegen-x87.cc136
-rw-r--r--deps/v8/src/x87/lithium-gap-resolver-x87.cc2
-rw-r--r--deps/v8/src/x87/lithium-gap-resolver-x87.h2
-rw-r--r--deps/v8/src/x87/lithium-x87.cc55
-rw-r--r--deps/v8/src/x87/lithium-x87.h60
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc36
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.h26
-rw-r--r--deps/v8/test/benchmarks/testcfg.py22
-rw-r--r--deps/v8/test/cctest/OWNERS7
-rw-r--r--deps/v8/test/cctest/cctest.cc13
-rw-r--r--deps/v8/test/cctest/cctest.gyp11
-rw-r--r--deps/v8/test/cctest/cctest.h35
-rw-r--r--deps/v8/test/cctest/cctest.status9
-rw-r--r--deps/v8/test/cctest/compiler/c-signature.h4
-rw-r--r--deps/v8/test/cctest/compiler/call-tester.h15
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.cc4
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h6
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.h19
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.h222
-rw-r--r--deps/v8/test/cctest/compiler/instruction-selector-tester.h127
-rw-r--r--deps/v8/test/cctest/compiler/simplified-graph-builder.cc87
-rw-r--r--deps/v8/test/cctest/compiler/simplified-graph-builder.h151
-rw-r--r--deps/v8/test/cctest/compiler/test-basic-block-profiler.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-branch-combine.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-changes-lowering.cc123
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-operator.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-pipeline.cc5
-rw-r--r--deps/v8/test/cctest/compiler/test-run-deopt.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-run-inlining.cc5
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jscalls.cc14
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc15
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc985
-rw-r--r--deps/v8/test/cctest/compiler/test-run-properties.cc18
-rw-r--r--deps/v8/test/cctest/compiler/test-run-stubs.cc19
-rw-r--r--deps/v8/test/cctest/compiler/test-simplified-lowering.cc66
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc129
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc208
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc1
-rw-r--r--deps/v8/test/cctest/test-api.cc442
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc55
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc375
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc431
-rw-r--r--deps/v8/test/cctest/test-compiler.cc88
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc4
-rw-r--r--deps/v8/test/cctest/test-debug.cc249
-rw-r--r--deps/v8/test/cctest/test-deoptimization.cc2
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc2
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc2
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc19
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc5
-rw-r--r--deps/v8/test/cctest/test-disasm-mips.cc39
-rw-r--r--deps/v8/test/cctest/test-disasm-mips64.cc39
-rw-r--r--deps/v8/test/cctest/test-disasm-ppc.cc4
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc4
-rw-r--r--deps/v8/test/cctest/test-disasm-x87.cc5
-rw-r--r--deps/v8/test/cctest/test-extra.js8
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc102
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc45
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc17
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc50
-rw-r--r--deps/v8/test/cctest/test-heap.cc590
-rw-r--r--deps/v8/test/cctest/test-liveedit.cc2
-rw-r--r--deps/v8/test/cctest/test-lockers.cc7
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc68
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc4
-rw-r--r--deps/v8/test/cctest/test-mementos.cc1
-rw-r--r--deps/v8/test/cctest/test-migrations.cc1
-rw-r--r--deps/v8/test/cctest/test-object-observe.cc6
-rw-r--r--deps/v8/test/cctest/test-parsing.cc150
-rw-r--r--deps/v8/test/cctest/test-regexp.cc27
-rw-r--r--deps/v8/test/cctest/test-reloc-info.cc2
-rw-r--r--deps/v8/test/cctest/test-serialize.cc8
-rw-r--r--deps/v8/test/cctest/test-simd.cc117
-rw-r--r--deps/v8/test/cctest/test-spaces.cc5
-rw-r--r--deps/v8/test/cctest/test-strings.cc9
-rw-r--r--deps/v8/test/cctest/test-threads.cc82
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc17
-rwxr-xr-xdeps/v8/test/intl/number-format/check-minimum-fraction-digits.js9
-rwxr-xr-xdeps/v8/test/intl/number-format/format-currency.js19
-rw-r--r--deps/v8/test/intl/string/normalization.js2
-rw-r--r--deps/v8/test/message/arrow-formal-parameters.js7
-rw-r--r--deps/v8/test/message/arrow-formal-parameters.out4
-rw-r--r--deps/v8/test/message/for-loop-invalid-lhs.js9
-rw-r--r--deps/v8/test/message/for-loop-invalid-lhs.out4
-rw-r--r--deps/v8/test/message/new-target-assignment.js7
-rw-r--r--deps/v8/test/message/new-target-assignment.out4
-rw-r--r--deps/v8/test/message/new-target-for-loop.js7
-rw-r--r--deps/v8/test/message/new-target-for-loop.out4
-rw-r--r--deps/v8/test/message/new-target-postfix-op.js7
-rw-r--r--deps/v8/test/message/new-target-postfix-op.out4
-rw-r--r--deps/v8/test/message/new-target-prefix-op.js7
-rw-r--r--deps/v8/test/message/new-target-prefix-op.out4
-rw-r--r--deps/v8/test/message/strict-formal-parameters.out4
-rw-r--r--deps/v8/test/mjsunit/array-functions-prototype-misc.js72
-rw-r--r--deps/v8/test/mjsunit/array-push7.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/string-length.js31
-rw-r--r--deps/v8/test/mjsunit/compiler/stubs/floor-stub.js39
-rw-r--r--deps/v8/test/mjsunit/d8-worker-sharedarraybuffer.js100
-rw-r--r--deps/v8/test/mjsunit/date-parse.js12
-rw-r--r--deps/v8/test/mjsunit/date.js108
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate.js1
-rw-r--r--deps/v8/test/mjsunit/debug-function-scopes.js2
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-restart-frame.js5
-rw-r--r--deps/v8/test/mjsunit/debug-materialized.js41
-rw-r--r--deps/v8/test/mjsunit/debug-mirror-cache.js6
-rw-r--r--deps/v8/test/mjsunit/debug-optimize.js54
-rw-r--r--deps/v8/test/mjsunit/debug-return-value.js1
-rw-r--r--deps/v8/test/mjsunit/debug-script-breakpoints.js98
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-construct-call.js42
-rw-r--r--deps/v8/test/mjsunit/element-accessor.js33
-rw-r--r--deps/v8/test/mjsunit/element-read-only.js154
-rw-r--r--deps/v8/test/mjsunit/elements-kind.js55
-rw-r--r--deps/v8/test/mjsunit/error-constructors.js52
-rw-r--r--deps/v8/test/mjsunit/es6/array-iterator.js3
-rw-r--r--deps/v8/test/mjsunit/es6/array-reverse-order.js10
-rw-r--r--deps/v8/test/mjsunit/es6/block-conflicts.js20
-rw-r--r--deps/v8/test/mjsunit/es6/block-const-assign.js2
-rw-r--r--deps/v8/test/mjsunit/es6/block-leave.js2
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-declaration.js17
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-semantics.js35
-rw-r--r--deps/v8/test/mjsunit/es6/block-scoping.js47
-rw-r--r--deps/v8/test/mjsunit/es6/class-computed-property-names-super.js (renamed from deps/v8/test/mjsunit/harmony/class-computed-property-names-super.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/computed-property-names-classes.js (renamed from deps/v8/test/mjsunit/harmony/computed-property-names-classes.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/computed-property-names-deopt.js (renamed from deps/v8/test/mjsunit/harmony/computed-property-names-deopt.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/computed-property-names-object-literals-methods.js (renamed from deps/v8/test/mjsunit/harmony/computed-property-names-object-literals-methods.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/computed-property-names-super.js (renamed from deps/v8/test/mjsunit/harmony/computed-property-names-super.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/computed-property-names.js (renamed from deps/v8/test/mjsunit/harmony/computed-property-names.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-blockscopes.js18
-rw-r--r--deps/v8/test/mjsunit/es6/debug-function-scopes.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/events.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-caught-all.js5
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js5
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js5
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-late.js5
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js5
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js5
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js5
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js5
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-caught-all.js5
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js5
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-all.js5
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js5
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js5
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js5
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js1
-rw-r--r--deps/v8/test/mjsunit/es6/microtask-delivery.js1
-rw-r--r--deps/v8/test/mjsunit/es6/promises.js23
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-3750.js2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-cr512574.js9
-rw-r--r--deps/v8/test/mjsunit/es6/templates.js2
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-set-length.js54
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray.js11
-rw-r--r--deps/v8/test/mjsunit/es6/unicode-escapes.js (renamed from deps/v8/test/mjsunit/harmony/unicode-escapes.js)2
-rw-r--r--deps/v8/test/mjsunit/es7/object-observe-debug-event.js1
-rw-r--r--deps/v8/test/mjsunit/es7/object-observe-runtime.js1
-rw-r--r--deps/v8/test/mjsunit/es7/object-observe.js2
-rw-r--r--deps/v8/test/mjsunit/function-bind.js19
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics.js67
-rw-r--r--deps/v8/test/mjsunit/harmony/block-conflicts-sloppy.js179
-rw-r--r--deps/v8/test/mjsunit/harmony/block-const-assign-sloppy.js158
-rw-r--r--deps/v8/test/mjsunit/harmony/block-for-sloppy.js199
-rw-r--r--deps/v8/test/mjsunit/harmony/block-leave-sloppy.js224
-rw-r--r--deps/v8/test/mjsunit/harmony/block-let-crankshaft-sloppy.js483
-rw-r--r--deps/v8/test/mjsunit/harmony/block-let-declaration-sloppy.js174
-rw-r--r--deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js192
-rw-r--r--deps/v8/test/mjsunit/harmony/block-scope-class.js59
-rw-r--r--deps/v8/test/mjsunit/harmony/block-scoping-sloppy.js310
-rw-r--r--deps/v8/test/mjsunit/harmony/block-scoping-top-level-sloppy.js34
-rw-r--r--deps/v8/test/mjsunit/harmony/default-parameters-debug.js58
-rw-r--r--deps/v8/test/mjsunit/harmony/default-parameters.js251
-rw-r--r--deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount-nolazy.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/destructuring.js223
-rw-r--r--deps/v8/test/mjsunit/harmony/futex.js274
-rw-r--r--deps/v8/test/mjsunit/harmony/new-target.js30
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-405844.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-4298.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-4417.js12
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-513474.js7
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-517455.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-451770.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-observe-empty-double-array.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/simd.js560
-rw-r--r--deps/v8/test/mjsunit/harmony/super.js93
-rw-r--r--deps/v8/test/mjsunit/harmony/typed-array-includes.js203
-rw-r--r--deps/v8/test/mjsunit/invalid-lhs.js6
-rw-r--r--deps/v8/test/mjsunit/messages.js14
-rw-r--r--deps/v8/test/mjsunit/migrations.js1
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status21
-rw-r--r--deps/v8/test/mjsunit/opt-elements-kind.js65
-rw-r--r--deps/v8/test/mjsunit/osr-elements-kind.js65
-rw-r--r--deps/v8/test/mjsunit/primitive-keyed-access.js49
-rw-r--r--deps/v8/test/mjsunit/regress-4399.js8
-rw-r--r--deps/v8/test/mjsunit/regress/cross-script-vars.js575
-rw-r--r--deps/v8/test/mjsunit/regress/regress-119429.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3315.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-356589.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-417709a.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4271.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4279.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4296.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4309-1.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4309-2.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4309-3.js39
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4320.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4325.js48
-rw-r--r--deps/v8/test/mjsunit/regress/regress-455207.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-509961.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-514362.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-581.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-490021.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-501711.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-501809.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-505007-1.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-505007-2.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-505907.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-506549.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-506956.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-507070.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-510426.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-510738.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-511880.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-513472.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-513507.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-514081.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-516592.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-516775.js53
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-517592.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-518747.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-518748.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-522380.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-522496.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-523308.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-debugger-redirect.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-observe-map-cache.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-typedarray-length.js37
-rw-r--r--deps/v8/test/mjsunit/regress/string-compare-memcmp.js2
-rw-r--r--deps/v8/test/mjsunit/samevalue.js148
-rw-r--r--deps/v8/test/mjsunit/string-normalize.js8
-rw-r--r--deps/v8/test/mjsunit/strong/class-extend-null.js97
-rw-r--r--deps/v8/test/mjsunit/strong/class-object-frozen.js98
-rw-r--r--deps/v8/test/mjsunit/strong/declaration-after-use.js1
-rw-r--r--deps/v8/test/mjsunit/strong/destructuring.js25
-rw-r--r--deps/v8/test/mjsunit/strong/literals.js4
-rw-r--r--deps/v8/test/mjsunit/unbox-double-arrays.js11
-rw-r--r--deps/v8/test/preparser/testcfg.py4
-rw-r--r--deps/v8/test/promises-aplus/lib/global.js13
-rw-r--r--deps/v8/test/promises-aplus/lib/mocha.js17
-rw-r--r--deps/v8/test/promises-aplus/testcfg.py2
-rw-r--r--deps/v8/test/simdjs/SimdJs.json76
-rw-r--r--deps/v8/test/simdjs/harness-adapt.js10
-rw-r--r--deps/v8/test/simdjs/simdjs.status6
-rw-r--r--deps/v8/test/simdjs/testcfg.py48
-rw-r--r--deps/v8/test/test262-es6/test262-es6.status609
-rw-r--r--deps/v8/test/test262-es6/testcfg.py64
-rw-r--r--deps/v8/test/test262/test262.status3
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc34
-rw-r--r--deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc309
-rw-r--r--deps/v8/test/unittests/compiler/compiler-test-utils.h16
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.h41
-rw-r--r--deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/instruction-sequence-unittest.h6
-rw-r--r--deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc262
-rw-r--r--deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h56
-rw-r--r--deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc306
-rw-r--r--deps/v8/test/unittests/compiler/js-type-feedback-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc41
-rw-r--r--deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc324
-rw-r--r--deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc49
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc103
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h37
-rw-r--r--deps/v8/test/unittests/compiler/ppc/OWNERS1
-rw-r--r--deps/v8/test/unittests/compiler/register-allocator-unittest.cc207
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-unittest.cc28
-rw-r--r--deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc42
-rw-r--r--deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc13
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc137
-rw-r--r--deps/v8/test/unittests/test-utils.cc9
-rw-r--r--deps/v8/test/unittests/unittests.gyp15
-rw-r--r--deps/v8/test/webkit/class-syntax-name-expected.txt4
-rw-r--r--deps/v8/test/webkit/class-syntax-name.js2
-rw-r--r--deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt94
-rw-r--r--deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames.js135
-rw-r--r--deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt4
-rw-r--r--deps/v8/test/webkit/fast/js/excessive-comma-usage-expected.txt2
-rw-r--r--deps/v8/test/webkit/fast/js/excessive-comma-usage.js6
-rw-r--r--deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt56
-rw-r--r--deps/v8/test/webkit/fast/js/parser-syntax-check.js14
-rw-r--r--deps/v8/test/webkit/fast/js/primitive-property-access-edge-cases-expected.txt18
-rw-r--r--deps/v8/test/webkit/function-apply-aliased-expected.txt2
-rw-r--r--deps/v8/test/webkit/function-apply-aliased.js3
-rwxr-xr-xdeps/v8/tools/check-inline-includes.sh19
-rwxr-xr-xdeps/v8/tools/check-unused-bailouts.sh16
-rw-r--r--deps/v8/tools/external-reference-check.py1
-rw-r--r--deps/v8/tools/gdb-v8-support.py1
-rw-r--r--deps/v8/tools/gdbinit9
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py14
-rw-r--r--deps/v8/tools/gyp/v8.gyp200
-rwxr-xr-xdeps/v8/tools/js2c.py6
-rwxr-xr-xdeps/v8/tools/run-deopt-fuzzer.py4
-rwxr-xr-xdeps/v8/tools/run-tests.py44
-rwxr-xr-xdeps/v8/tools/run_perf.py518
-rw-r--r--deps/v8/tools/testrunner/local/execution.py2
-rw-r--r--deps/v8/tools/testrunner/local/progress.py8
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py100
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py17
-rw-r--r--deps/v8/tools/unittests/run_perf_test.py53
-rw-r--r--deps/v8/tools/v8heapconst.py396
-rw-r--r--deps/v8/tools/whitespace.txt2
1058 files changed, 47021 insertions, 33230 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index d0a859c12d..cc5606e854 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -60,6 +60,7 @@ shell_g
/test/promises-aplus/promises-tests
/test/promises-aplus/promises-tests.tar.gz
/test/promises-aplus/sinon
+/test/simdjs/CHECKED_OUT_*
/test/simdjs/ecmascript_simd*
/test/simdjs/data*
/test/test262/data
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 4f853c275b..72c23bcc83 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -44,6 +44,7 @@ Bert Belder <bertbelder@gmail.com>
Burcu Dogan <burcujdogan@gmail.com>
Caitlin Potter <caitpotter88@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
+Chris Nardi <hichris123@gmail.com>
Christopher A. Taylor <chris@gameclosure.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel James <dnljms@gmail.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 3eb7fc73eb..fae41a7361 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -203,8 +203,8 @@ action("js2c") {
sources = [
"src/macros.py",
"src/messages.h",
- "src/runtime.js",
"src/prologue.js",
+ "src/runtime.js",
"src/v8natives.js",
"src/symbol.js",
"src/array.js",
@@ -227,12 +227,12 @@ action("js2c") {
"src/json.js",
"src/array-iterator.js",
"src/string-iterator.js",
- "src/debug-debugger.js",
- "src/mirror-debugger.js",
- "src/liveedit-debugger.js",
"src/templates.js",
"src/harmony-array.js",
"src/harmony-typedarray.js",
+ "src/debug/mirrors.js",
+ "src/debug/debug.js",
+ "src/debug/liveedit.js",
]
outputs = [
@@ -257,6 +257,40 @@ action("js2c") {
}
}
+action("js2c_code_stubs") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+
+ script = "tools/js2c.py"
+
+ # The script depends on this other script, this rule causes a rebuild if it
+ # changes.
+ inputs = [ "tools/jsmin.py" ]
+
+ sources = [
+ "src/macros.py",
+ "src/messages.h",
+ "src/code-stubs.js"
+ ]
+
+ outputs = [
+ "$target_gen_dir/code-stub-libraries.cc",
+ ]
+
+ args = [
+ rebase_path("$target_gen_dir/code-stub-libraries.cc",
+ root_build_dir),
+ "CODE_STUB",
+ ] + rebase_path(sources, root_build_dir)
+
+ if (v8_use_external_startup_data) {
+ outputs += [ "$target_gen_dir/libraries_code_stub.bin" ]
+ args += [
+ "--startup_blob",
+ rebase_path("$target_gen_dir/libraries_code_stub.bin", root_build_dir),
+ ]
+ }
+}
+
action("js2c_experimental") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@@ -279,7 +313,9 @@ action("js2c_experimental") {
"src/harmony-reflect.js",
"src/harmony-spread.js",
"src/harmony-object.js",
- "src/harmony-sharedarraybuffer.js"
+ "src/harmony-object-observe.js",
+ "src/harmony-sharedarraybuffer.js",
+ "src/harmony-simd.js"
]
outputs = [
@@ -355,12 +391,14 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
+ ":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
]
sources = [
"$target_gen_dir/libraries.bin",
+ "$target_gen_dir/libraries_code_stub.bin",
"$target_gen_dir/libraries_experimental.bin",
"$target_gen_dir/libraries_extras.bin",
]
@@ -446,6 +484,7 @@ source_set("v8_nosnapshot") {
deps = [
":js2c",
+ ":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":v8_base",
@@ -453,6 +492,7 @@ source_set("v8_nosnapshot") {
sources = [
"$target_gen_dir/libraries.cc",
+ "$target_gen_dir/code-stub-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"src/snapshot/snapshot-empty.cc",
@@ -477,6 +517,7 @@ source_set("v8_snapshot") {
deps = [
":js2c",
+ ":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":v8_base",
@@ -489,6 +530,7 @@ source_set("v8_snapshot") {
sources = [
"$target_gen_dir/libraries.cc",
+ "$target_gen_dir/code-stub-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/snapshot.cc",
@@ -509,6 +551,7 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
+ ":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":v8_base",
@@ -587,13 +630,13 @@ source_set("v8_base") {
"src/bootstrapper.h",
"src/builtins.cc",
"src/builtins.h",
- "src/bytecodes-irregexp.h",
+ "src/cancelable-task.cc",
+ "src/cancelable-task.h",
"src/cached-powers.cc",
"src/cached-powers.h",
"src/char-predicates.cc",
"src/char-predicates-inl.h",
"src/char-predicates.h",
- "src/checks.cc",
"src/checks.h",
"src/circular-queue-inl.h",
"src/circular-queue.h",
@@ -623,6 +666,7 @@ source_set("v8_base") {
"src/compiler/basic-block-instrumentor.h",
"src/compiler/change-lowering.cc",
"src/compiler/change-lowering.h",
+ "src/compiler/c-linkage.cc",
"src/compiler/coalesced-live-ranges.cc",
"src/compiler/coalesced-live-ranges.h",
"src/compiler/code-generator-impl.h",
@@ -643,6 +687,7 @@ source_set("v8_base") {
"src/compiler/dead-code-elimination.cc",
"src/compiler/dead-code-elimination.h",
"src/compiler/diamond.h",
+ "src/compiler/frame.cc",
"src/compiler/frame.h",
"src/compiler/frame-elider.cc",
"src/compiler/frame-elider.h",
@@ -650,7 +695,6 @@ source_set("v8_base") {
"src/compiler/frame-states.h",
"src/compiler/gap-resolver.cc",
"src/compiler/gap-resolver.h",
- "src/compiler/graph-builder.h",
"src/compiler/graph-reducer.cc",
"src/compiler/graph-reducer.h",
"src/compiler/graph-replay.cc",
@@ -669,8 +713,12 @@ source_set("v8_base") {
"src/compiler/instruction-selector.h",
"src/compiler/instruction.cc",
"src/compiler/instruction.h",
+ "src/compiler/interpreter-assembler.cc",
+ "src/compiler/interpreter-assembler.h",
"src/compiler/js-builtin-reducer.cc",
"src/compiler/js-builtin-reducer.h",
+ "src/compiler/js-context-relaxation.cc",
+ "src/compiler/js-context-relaxation.h",
"src/compiler/js-context-specialization.cc",
"src/compiler/js-context-specialization.h",
"src/compiler/js-frame-specialization.cc",
@@ -687,11 +735,12 @@ source_set("v8_base") {
"src/compiler/js-operator.h",
"src/compiler/js-type-feedback.cc",
"src/compiler/js-type-feedback.h",
+ "src/compiler/js-type-feedback-lowering.cc",
+ "src/compiler/js-type-feedback-lowering.h",
"src/compiler/js-typed-lowering.cc",
"src/compiler/js-typed-lowering.h",
"src/compiler/jump-threading.cc",
"src/compiler/jump-threading.h",
- "src/compiler/linkage-impl.h",
"src/compiler/linkage.cc",
"src/compiler/linkage.h",
"src/compiler/liveness-analyzer.cc",
@@ -732,6 +781,8 @@ source_set("v8_base") {
"src/compiler/pipeline.h",
"src/compiler/pipeline-statistics.cc",
"src/compiler/pipeline-statistics.h",
+ "src/compiler/preprocess-live-ranges.cc",
+ "src/compiler/preprocess-live-ranges.h",
"src/compiler/raw-machine-assembler.cc",
"src/compiler/raw-machine-assembler.h",
"src/compiler/register-allocator.cc",
@@ -769,6 +820,8 @@ source_set("v8_base") {
"src/compiler/zone-pool.h",
"src/compiler.cc",
"src/compiler.h",
+ "src/context-measure.cc",
+ "src/context-measure.h",
"src/contexts.cc",
"src/contexts.h",
"src/conversions-inl.h",
@@ -784,8 +837,16 @@ source_set("v8_base") {
"src/dateparser-inl.h",
"src/dateparser.cc",
"src/dateparser.h",
- "src/debug.cc",
- "src/debug.h",
+ "src/debug/debug-evaluate.cc",
+ "src/debug/debug-evaluate.h",
+ "src/debug/debug-frames.cc",
+ "src/debug/debug-frames.h",
+ "src/debug/debug-scopes.cc",
+ "src/debug/debug-scopes.h",
+ "src/debug/debug.cc",
+ "src/debug/debug.h",
+ "src/debug/liveedit.cc",
+ "src/debug/liveedit.h",
"src/deoptimizer.cc",
"src/deoptimizer.h",
"src/disasm.h",
@@ -828,10 +889,12 @@ source_set("v8_base") {
"src/frames-inl.h",
"src/frames.cc",
"src/frames.h",
- "src/full-codegen.cc",
- "src/full-codegen.h",
+ "src/full-codegen/full-codegen.cc",
+ "src/full-codegen/full-codegen.h",
"src/func-name-inferrer.cc",
"src/func-name-inferrer.h",
+ "src/futex-emulation.cc",
+ "src/futex-emulation.h",
"src/gdb-jit.cc",
"src/gdb-jit.h",
"src/global-handles.cc",
@@ -944,15 +1007,18 @@ source_set("v8_base") {
"src/ic/stub-cache.h",
"src/interface-descriptors.cc",
"src/interface-descriptors.h",
- "src/interpreter-irregexp.cc",
- "src/interpreter-irregexp.h",
+ "src/interpreter/bytecodes.cc",
+ "src/interpreter/bytecodes.h",
+ "src/interpreter/bytecode-array-builder.cc",
+ "src/interpreter/bytecode-array-builder.h",
+ "src/interpreter/bytecode-generator.cc",
+ "src/interpreter/bytecode-generator.h",
+ "src/interpreter/interpreter.cc",
+ "src/interpreter/interpreter.h",
"src/isolate.cc",
"src/isolate.h",
"src/json-parser.h",
"src/json-stringifier.h",
- "src/jsregexp-inl.h",
- "src/jsregexp.cc",
- "src/jsregexp.h",
"src/layout-descriptor-inl.h",
"src/layout-descriptor.cc",
"src/layout-descriptor.h",
@@ -965,8 +1031,6 @@ source_set("v8_base") {
"src/lithium-codegen.h",
"src/lithium.cc",
"src/lithium.h",
- "src/liveedit.cc",
- "src/liveedit.h",
"src/log-inl.h",
"src/log-utils.cc",
"src/log-utils.h",
@@ -1009,17 +1073,23 @@ source_set("v8_base") {
"src/property.cc",
"src/property.h",
"src/prototype.h",
- "src/regexp-macro-assembler-irregexp-inl.h",
- "src/regexp-macro-assembler-irregexp.cc",
- "src/regexp-macro-assembler-irregexp.h",
- "src/regexp-macro-assembler-tracer.cc",
- "src/regexp-macro-assembler-tracer.h",
- "src/regexp-macro-assembler.cc",
- "src/regexp-macro-assembler.h",
- "src/regexp-stack.cc",
- "src/regexp-stack.h",
"src/rewriter.cc",
"src/rewriter.h",
+ "src/regexp/bytecodes-irregexp.h",
+ "src/regexp/interpreter-irregexp.cc",
+ "src/regexp/interpreter-irregexp.h",
+ "src/regexp/jsregexp-inl.h",
+ "src/regexp/jsregexp.cc",
+ "src/regexp/jsregexp.h",
+ "src/regexp/regexp-macro-assembler-irregexp-inl.h",
+ "src/regexp/regexp-macro-assembler-irregexp.cc",
+ "src/regexp/regexp-macro-assembler-irregexp.h",
+ "src/regexp/regexp-macro-assembler-tracer.cc",
+ "src/regexp/regexp-macro-assembler-tracer.h",
+ "src/regexp/regexp-macro-assembler.cc",
+ "src/regexp/regexp-macro-assembler.h",
+ "src/regexp/regexp-stack.cc",
+ "src/regexp/regexp-stack.h",
"src/runtime-profiler.cc",
"src/runtime-profiler.h",
"src/runtime/runtime-array.cc",
@@ -1031,6 +1101,7 @@ source_set("v8_base") {
"src/runtime/runtime-debug.cc",
"src/runtime/runtime-forin.cc",
"src/runtime/runtime-function.cc",
+ "src/runtime/runtime-futex.cc",
"src/runtime/runtime-generator.cc",
"src/runtime/runtime-i18n.cc",
"src/runtime/runtime-internal.cc",
@@ -1044,6 +1115,7 @@ source_set("v8_base") {
"src/runtime/runtime-proxy.cc",
"src/runtime/runtime-regexp.cc",
"src/runtime/runtime-scopes.cc",
+ "src/runtime/runtime-simd.cc",
"src/runtime/runtime-strings.cc",
"src/runtime/runtime-symbol.cc",
"src/runtime/runtime-test.cc",
@@ -1067,8 +1139,8 @@ source_set("v8_base") {
"src/signature.h",
"src/simulator.h",
"src/small-pointer-list.h",
- "src/smart-pointers.h",
"src/snapshot/natives.h",
+ "src/snapshot/natives-common.cc",
"src/snapshot/serialize.cc",
"src/snapshot/serialize.h",
"src/snapshot/snapshot-common.cc",
@@ -1081,7 +1153,6 @@ source_set("v8_base") {
"src/startup-data-util.cc",
"src/string-builder.cc",
"src/string-builder.h",
- "src/string-search.cc",
"src/string-search.h",
"src/string-stream.cc",
"src/string-stream.h",
@@ -1144,12 +1215,10 @@ source_set("v8_base") {
"src/ia32/codegen-ia32.cc",
"src/ia32/codegen-ia32.h",
"src/ia32/cpu-ia32.cc",
- "src/ia32/debug-ia32.cc",
"src/ia32/deoptimizer-ia32.cc",
"src/ia32/disasm-ia32.cc",
"src/ia32/frames-ia32.cc",
"src/ia32/frames-ia32.h",
- "src/ia32/full-codegen-ia32.cc",
"src/ia32/interface-descriptors-ia32.cc",
"src/ia32/lithium-codegen-ia32.cc",
"src/ia32/lithium-codegen-ia32.h",
@@ -1159,17 +1228,18 @@ source_set("v8_base") {
"src/ia32/lithium-ia32.h",
"src/ia32/macro-assembler-ia32.cc",
"src/ia32/macro-assembler-ia32.h",
- "src/ia32/regexp-macro-assembler-ia32.cc",
- "src/ia32/regexp-macro-assembler-ia32.h",
"src/compiler/ia32/code-generator-ia32.cc",
"src/compiler/ia32/instruction-codes-ia32.h",
"src/compiler/ia32/instruction-selector-ia32.cc",
- "src/compiler/ia32/linkage-ia32.cc",
+ "src/debug/ia32/debug-ia32.cc",
+ "src/full-codegen/ia32/full-codegen-ia32.cc",
"src/ic/ia32/access-compiler-ia32.cc",
"src/ic/ia32/handler-compiler-ia32.cc",
"src/ic/ia32/ic-ia32.cc",
"src/ic/ia32/ic-compiler-ia32.cc",
"src/ic/ia32/stub-cache-ia32.cc",
+ "src/regexp/ia32/regexp-macro-assembler-ia32.cc",
+ "src/regexp/ia32/regexp-macro-assembler-ia32.h",
]
} else if (v8_target_arch == "x64") {
sources += [
@@ -1182,12 +1252,10 @@ source_set("v8_base") {
"src/x64/codegen-x64.cc",
"src/x64/codegen-x64.h",
"src/x64/cpu-x64.cc",
- "src/x64/debug-x64.cc",
"src/x64/deoptimizer-x64.cc",
"src/x64/disasm-x64.cc",
"src/x64/frames-x64.cc",
"src/x64/frames-x64.h",
- "src/x64/full-codegen-x64.cc",
"src/x64/interface-descriptors-x64.cc",
"src/x64/lithium-codegen-x64.cc",
"src/x64/lithium-codegen-x64.h",
@@ -1197,17 +1265,18 @@ source_set("v8_base") {
"src/x64/lithium-x64.h",
"src/x64/macro-assembler-x64.cc",
"src/x64/macro-assembler-x64.h",
- "src/x64/regexp-macro-assembler-x64.cc",
- "src/x64/regexp-macro-assembler-x64.h",
"src/compiler/x64/code-generator-x64.cc",
"src/compiler/x64/instruction-codes-x64.h",
"src/compiler/x64/instruction-selector-x64.cc",
- "src/compiler/x64/linkage-x64.cc",
+ "src/debug/x64/debug-x64.cc",
+ "src/full-codegen/x64/full-codegen-x64.cc",
"src/ic/x64/access-compiler-x64.cc",
"src/ic/x64/handler-compiler-x64.cc",
"src/ic/x64/ic-x64.cc",
"src/ic/x64/ic-compiler-x64.cc",
"src/ic/x64/stub-cache-x64.cc",
+ "src/regexp/x64/regexp-macro-assembler-x64.cc",
+ "src/regexp/x64/regexp-macro-assembler-x64.h",
]
} else if (v8_target_arch == "arm") {
sources += [
@@ -1222,12 +1291,10 @@ source_set("v8_base") {
"src/arm/constants-arm.h",
"src/arm/constants-arm.cc",
"src/arm/cpu-arm.cc",
- "src/arm/debug-arm.cc",
"src/arm/deoptimizer-arm.cc",
"src/arm/disasm-arm.cc",
"src/arm/frames-arm.cc",
"src/arm/frames-arm.h",
- "src/arm/full-codegen-arm.cc",
"src/arm/interface-descriptors-arm.cc",
"src/arm/interface-descriptors-arm.h",
"src/arm/lithium-arm.cc",
@@ -1238,19 +1305,20 @@ source_set("v8_base") {
"src/arm/lithium-gap-resolver-arm.h",
"src/arm/macro-assembler-arm.cc",
"src/arm/macro-assembler-arm.h",
- "src/arm/regexp-macro-assembler-arm.cc",
- "src/arm/regexp-macro-assembler-arm.h",
"src/arm/simulator-arm.cc",
"src/arm/simulator-arm.h",
"src/compiler/arm/code-generator-arm.cc",
"src/compiler/arm/instruction-codes-arm.h",
"src/compiler/arm/instruction-selector-arm.cc",
- "src/compiler/arm/linkage-arm.cc",
+ "src/debug/arm/debug-arm.cc",
+ "src/full-codegen/arm/full-codegen-arm.cc",
"src/ic/arm/access-compiler-arm.cc",
"src/ic/arm/handler-compiler-arm.cc",
"src/ic/arm/ic-arm.cc",
"src/ic/arm/ic-compiler-arm.cc",
"src/ic/arm/stub-cache-arm.cc",
+ "src/regexp/arm/regexp-macro-assembler-arm.cc",
+ "src/regexp/arm/regexp-macro-assembler-arm.h",
]
} else if (v8_target_arch == "arm64") {
sources += [
@@ -1264,7 +1332,6 @@ source_set("v8_base") {
"src/arm64/code-stubs-arm64.h",
"src/arm64/constants-arm64.h",
"src/arm64/cpu-arm64.cc",
- "src/arm64/debug-arm64.cc",
"src/arm64/decoder-arm64.cc",
"src/arm64/decoder-arm64.h",
"src/arm64/decoder-arm64-inl.h",
@@ -1273,7 +1340,6 @@ source_set("v8_base") {
"src/arm64/disasm-arm64.h",
"src/arm64/frames-arm64.cc",
"src/arm64/frames-arm64.h",
- "src/arm64/full-codegen-arm64.cc",
"src/arm64/instructions-arm64.cc",
"src/arm64/instructions-arm64.h",
"src/arm64/instrument-arm64.cc",
@@ -1289,8 +1355,6 @@ source_set("v8_base") {
"src/arm64/macro-assembler-arm64.cc",
"src/arm64/macro-assembler-arm64.h",
"src/arm64/macro-assembler-arm64-inl.h",
- "src/arm64/regexp-macro-assembler-arm64.cc",
- "src/arm64/regexp-macro-assembler-arm64.h",
"src/arm64/simulator-arm64.cc",
"src/arm64/simulator-arm64.h",
"src/arm64/utils-arm64.cc",
@@ -1298,12 +1362,15 @@ source_set("v8_base") {
"src/compiler/arm64/code-generator-arm64.cc",
"src/compiler/arm64/instruction-codes-arm64.h",
"src/compiler/arm64/instruction-selector-arm64.cc",
- "src/compiler/arm64/linkage-arm64.cc",
+ "src/debug/arm64/debug-arm64.cc",
+ "src/full-codegen/arm64/full-codegen-arm64.cc",
"src/ic/arm64/access-compiler-arm64.cc",
"src/ic/arm64/handler-compiler-arm64.cc",
"src/ic/arm64/ic-arm64.cc",
"src/ic/arm64/ic-compiler-arm64.cc",
"src/ic/arm64/stub-cache-arm64.cc",
+ "src/regexp/arm64/regexp-macro-assembler-arm64.cc",
+ "src/regexp/arm64/regexp-macro-assembler-arm64.h",
]
} else if (v8_target_arch == "mipsel") {
sources += [
@@ -1318,12 +1385,10 @@ source_set("v8_base") {
"src/mips/constants-mips.cc",
"src/mips/constants-mips.h",
"src/mips/cpu-mips.cc",
- "src/mips/debug-mips.cc",
"src/mips/deoptimizer-mips.cc",
"src/mips/disasm-mips.cc",
"src/mips/frames-mips.cc",
"src/mips/frames-mips.h",
- "src/mips/full-codegen-mips.cc",
"src/mips/interface-descriptors-mips.cc",
"src/mips/lithium-codegen-mips.cc",
"src/mips/lithium-codegen-mips.h",
@@ -1333,19 +1398,20 @@ source_set("v8_base") {
"src/mips/lithium-mips.h",
"src/mips/macro-assembler-mips.cc",
"src/mips/macro-assembler-mips.h",
- "src/mips/regexp-macro-assembler-mips.cc",
- "src/mips/regexp-macro-assembler-mips.h",
"src/mips/simulator-mips.cc",
"src/mips/simulator-mips.h",
"src/compiler/mips/code-generator-mips.cc",
"src/compiler/mips/instruction-codes-mips.h",
"src/compiler/mips/instruction-selector-mips.cc",
- "src/compiler/mips/linkage-mips.cc",
+ "src/debug/mips/debug-mips.cc",
+ "src/full-codegen/mips/full-codegen-mips.cc",
"src/ic/mips/access-compiler-mips.cc",
"src/ic/mips/handler-compiler-mips.cc",
"src/ic/mips/ic-mips.cc",
"src/ic/mips/ic-compiler-mips.cc",
"src/ic/mips/stub-cache-mips.cc",
+ "src/regexp/mips/regexp-macro-assembler-mips.cc",
+ "src/regexp/mips/regexp-macro-assembler-mips.h",
]
} else if (v8_target_arch == "mips64el") {
sources += [
@@ -1360,12 +1426,10 @@ source_set("v8_base") {
"src/mips64/constants-mips64.cc",
"src/mips64/constants-mips64.h",
"src/mips64/cpu-mips64.cc",
- "src/mips64/debug-mips64.cc",
"src/mips64/deoptimizer-mips64.cc",
"src/mips64/disasm-mips64.cc",
"src/mips64/frames-mips64.cc",
"src/mips64/frames-mips64.h",
- "src/mips64/full-codegen-mips64.cc",
"src/mips64/interface-descriptors-mips64.cc",
"src/mips64/lithium-codegen-mips64.cc",
"src/mips64/lithium-codegen-mips64.h",
@@ -1375,15 +1439,17 @@ source_set("v8_base") {
"src/mips64/lithium-mips64.h",
"src/mips64/macro-assembler-mips64.cc",
"src/mips64/macro-assembler-mips64.h",
- "src/mips64/regexp-macro-assembler-mips64.cc",
- "src/mips64/regexp-macro-assembler-mips64.h",
"src/mips64/simulator-mips64.cc",
"src/mips64/simulator-mips64.h",
+ "src/debug/mips64/debug-mips64.cc",
+ "src/full-codegen/mips64/full-codegen-mips64.cc",
"src/ic/mips64/access-compiler-mips64.cc",
"src/ic/mips64/handler-compiler-mips64.cc",
"src/ic/mips64/ic-mips64.cc",
"src/ic/mips64/ic-compiler-mips64.cc",
"src/ic/mips64/stub-cache-mips64.cc",
+ "src/regexp/mips64/regexp-macro-assembler-mips64.cc",
+ "src/regexp/mips64/regexp-macro-assembler-mips64.h",
]
}
@@ -1479,6 +1545,7 @@ source_set("v8_libbase") {
"src/base/safe_conversions_impl.h",
"src/base/safe_math.h",
"src/base/safe_math_impl.h",
+ "src/base/smart-pointers.h",
"src/base/sys-info.cc",
"src/base/sys-info.h",
"src/base/utils/random-number-generator.cc",
@@ -1700,7 +1767,7 @@ if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") ||
"//build/config/sanitizers:deps",
]
- # TODO(jochen): Add support for readline and vtunejit.
+ # TODO(jochen): Add support for vtunejit.
if (is_posix) {
sources += [ "src/d8-posix.cc" ]
@@ -1710,8 +1777,6 @@ if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") ||
if (!is_component_build) {
sources += [
- "src/d8-debug.cc",
- "src/d8-debug.h",
"$target_gen_dir/d8-js.cc",
]
}
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index e8ce915633..0dff96acc7 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,574 @@
+2015-08-19: Version 4.6.85
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-19: Version 4.6.84
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-19: Version 4.6.83
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-18: Version 4.6.82
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-18: Version 4.6.81
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-18: Version 4.6.80
+
+ Filter out slot buffer slots, that point to SMIs in dead objects
+ (Chromium issues 454297, 519577).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-17: Version 4.6.79
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-17: Version 4.6.78
+
+ Put V8 extras into the snapshot.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-15: Version 4.6.77
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-14: Version 4.6.76
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-14: Version 4.6.75
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-14: Version 4.6.74
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-13: Version 4.6.73
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-13: Version 4.6.72
+
+ Stage sloppy classes (issue 3305).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-13: Version 4.6.71
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-12: Version 4.6.70
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-12: Version 4.6.69
+
+ Stage --harmony-array-includes (issue 3575).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-12: Version 4.6.68
+
+ Use a new lexical context for sloppy-mode eval (issue 4288).
+
+ Add includes method to typed arrays (issue 3575).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-11: Version 4.6.67
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-11: Version 4.6.66
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-11: Version 4.6.65
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-10: Version 4.6.64
+
+ Disable --global-var-shortcuts (Chromium issue 517778).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-10: Version 4.6.63
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-09: Version 4.6.62
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-08: Version 4.6.61
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-08: Version 4.6.60
+
+ [IC] Make SeededNumberDictionary::UpdateMaxNumberKey prototype aware
+ (issue 4335).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-08: Version 4.6.59
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-07: Version 4.6.58
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-07: Version 4.6.57
+
+ Rename "extras exports" to "extras binding" (Chromium issue 507133).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-07: Version 4.6.56
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-06: Version 4.6.55
+
+ Fix off-by-one in Array.concat's max index check (Chromium issue
+ 516592).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-06: Version 4.6.54
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-06: Version 4.6.53
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-05: Version 4.6.52
+
+ Ship --harmony-new-target (issue 3887).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-04: Version 4.6.51
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-04: Version 4.6.50
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-03: Version 4.6.49
+
+ SIMD.js Add the other SIMD Phase 1 types (issue 4124).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-03: Version 4.6.48
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-03: Version 4.6.47
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-01: Version 4.6.46
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-01: Version 4.6.45
+
+ Performance and stability improvements on all platforms.
+
+
+2015-08-01: Version 4.6.44
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-31: Version 4.6.43
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-31: Version 4.6.42
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-31: Version 4.6.41
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-30: Version 4.6.40
+
+ Pass the kGCCallbackFlagForced flag when invoking
+ Heap::CollectAllGarbage from AdjustAmountOfExternalAllocatedMemory
+ (Chromium issue 511294).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-30: Version 4.6.39
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-30: Version 4.6.38
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-29: Version 4.6.37
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-28: Version 4.6.36
+
+ Fix prototype registration upon SlowToFast migration (Chromium issue
+ 513602).
+
+ Bugfix: Incorrect type feedback vector structure on recompile (Chromium
+ issue 514526).
+
+ Reland of "Remove ExternalArray, derived types, and element kinds"
+ (issue 3996).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-28: Version 4.6.35
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-28: Version 4.6.34
+
+ Remove ExternalArray, derived types, and element kinds (issue 3996).
+
+ Make V8 compile with MSVS 2015 (issue 4326).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-27: Version 4.6.33
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-26: Version 4.6.32
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-25: Version 4.6.31
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-25: Version 4.6.30
+
+ Make dates default to the local timezone if none specified (issue 4242,
+ Chromium issue 391730).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-24: Version 4.6.29
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-24: Version 4.6.28
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-23: Version 4.6.27
+
+ Fix check for a date with a 24th hour (Chromium issue 174609).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-23: Version 4.6.26
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-22: Version 4.6.25
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-22: Version 4.6.24
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-22: Version 4.6.23
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-21: Version 4.6.22
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-21: Version 4.6.21
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-21: Version 4.6.20
+
+ Don't run the second pass of the pending phantom callbacks if the heap
+ has been torn down (Chromium issue 511204).
+
+ Debugger: prepare code for debugging on a per-function basis (issue
+ 4132).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-20: Version 4.6.19
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-20: Version 4.6.18
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-19: Version 4.6.17
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-18: Version 4.6.16
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-18: Version 4.6.15
+
+ Make NumberFormat use the ICU currency data, fix bug in NumberFormat
+ (Chromium issues 304722, 435465, 473104).
+
+ Properly fix enumerate / Object.keys wrt access checked objects
+ (Chromium issue 509936).
+
+ Fix object enumeration wrt access checked objects (Chromium issue
+ 509936).
+
+ Fix DefineOwnProperty for data properties wrt failed access checks
+ (Chromium issue 509936).
+
+ Fix GetOwnPropertyNames on access-checked objects (Chromium issue
+ 509936).
+
+ Fix getPrototypeOf for access checked objects (Chromium issue 509936).
+
+ Delete APIs deprecated since last release.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-17: Version 4.6.14
+
+ Array.prototype.reverse should call [[HasProperty]] on elements before
+ [[Get]] (issue 4223).
+
+ In RegExp, lastIndex is read with ToLength, not ToInteger (issue 4244).
+
+ Stage --harmony-new-target (issue 3887).
+
+ Re-ship harmony spread calls and spread arrays (issue 3018).
+
+ Expose SIMD.Float32x4 type to Javascript. This CL exposes the
+ constructor function, defines type related information, and implements
+ value type semantics. It also refactors test/mjsunit/samevalue.js to
+ test SameValue and SameValueZero (issue 4124).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-17: Version 4.6.13
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-16: Version 4.6.12
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-16: Version 4.6.11
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-16: Version 4.6.10
+
+ Expose SIMD.Float32x4 type to Javascript. This CL exposes the
+ constructor function, defines type related information, and implements
+ value type semantics. It also refactors test/mjsunit/samevalue.js to
+ test SameValue and SameValueZero (issue 4124).
+
+ Fix runtime-atomics for Win 10 SDK and remove volatile (Chromium issues
+ 440500, 491424).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-15: Version 4.6.9
+
+ Let the second pass phantom callbacks run in a separate task on the
+ foreground thread.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-15: Version 4.6.8
+
+ Optimize String.prototype.includes (issue 3807).
+
+ Unship spread calls and spread arrays (issue 4298).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-15: Version 4.6.7
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-14: Version 4.6.6
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-14: Version 4.6.5
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-14: Version 4.6.4
+
+ MIPS64: Fix BlockTrampolinePoolFor() to emit trampoline before blocking,
+ if needed (issue 4294).
+
+ Add convenience method for converting v8::PersistentBase to v8::Local.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-13: Version 4.6.3
+
+ MIPS: Fix BlockTrampolinePoolFor() to emit trampoline before blocking,
+ if needed (issue 4294).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-13: Version 4.6.2
+
+ [arm] CheckConstPool between TurboFan instructions (issue 4292).
+
+ Fix keyed access of primitive objects in the runtime. For now it uses a
+ pretty slow path for accessing strings by wrapping it into a new
+ temporary wrapper (issues 3088, 4042).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-12: Version 4.6.1
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-09: Version 4.5.107
+
+ [arm] Don't call branch_offset within CheckConstPool (issue 4292).
+
+ [arm] Fix missing CheckBuffer for branches (issue 4292).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-09: Version 4.5.106
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-09: Version 4.5.105
+
+ Guard @@isConcatSpreadable behind a flag (Chromium issue 507553).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-07-08: Version 4.5.104
+
+ [x64] Fix handling of Smi constants in LSubI and LBitI (Chromium issue
+ 478612).
+
+ Performance and stability improvements on all platforms.
+
+
2015-07-08: Version 4.5.103
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 689ae778bb..c8c7de080c 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -8,23 +8,23 @@ vars = {
deps = {
"v8/build/gyp":
- Var("git_url") + "/external/gyp.git" + "@" + "5122240c5e5c4d8da12c543d82b03d6089eb77c5",
+ Var("git_url") + "/external/gyp.git" + "@" + "6ee91ad8659871916f9aa840d42e1513befdf638",
"v8/third_party/icu":
- Var("git_url") + "/chromium/deps/icu.git" + "@" + "c81a1a3989c3b66fa323e9a6ee7418d7c08297af",
+ Var("git_url") + "/chromium/deps/icu.git" + "@" + "89dcdec16381883782b9cc9cff38e00f047a0f46",
"v8/buildtools":
- Var("git_url") + "/chromium/buildtools.git" + "@" + "ecc8e253abac3b6186a97573871a084f4c0ca3ae",
+ Var("git_url") + "/chromium/buildtools.git" + "@" + "565d04e8741429fb1b4f26d102f2c6c3b849edeb",
"v8/testing/gtest":
- Var("git_url") + "/external/googletest.git" + "@" + "23574bf2333f834ff665f894c97bef8a5b33a0a9",
+ Var("git_url") + "/external/googletest.git" + "@" + "9855a87157778d39b95eccfb201a9dc90f6d61c6",
"v8/testing/gmock":
- Var("git_url") + "/external/googlemock.git" + "@" + "29763965ab52f24565299976b936d1265cb6a271", # from svn revision 501
+ Var("git_url") + "/external/googlemock.git" + "@" + "0421b6f358139f02e102c9c332ce19a33faf75be",
"v8/tools/clang":
- Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "73ec8804ed395b0886d6edf82a9f33583f4a7902",
+ Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "5b12e334ec0e571a8e1f68d028dc5427b58c17ec",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
- Var("git_url") + "/android_tools.git" + "@" + "21f4bcbd6cd927e4b4227cfde7d5f13486be1236",
+ Var("git_url") + "/android_tools.git" + "@" + "9e9b6169a098bc19986e44fbbf65e4c29031e4bd",
},
"win": {
"v8/third_party/cygwin":
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index 97612655a7..baa3b52ca6 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -44,10 +44,6 @@ endif
ifdef component
GYPFLAGS += -Dcomponent=$(component)
endif
-# console=readline
-ifdef console
- GYPFLAGS += -Dconsole=$(console)
-endif
# disassembler=on
ifeq ($(disassembler), on)
GYPFLAGS += -Dv8_enable_disassembler=1
@@ -162,7 +158,9 @@ endif
ifdef embedscript
GYPFLAGS += -Dembed_script=$(embedscript)
endif
-
+ifeq ($(goma), on)
+ GYPFLAGS += -Duse_goma=1
+endif
# arm specific flags.
# arm_version=<number | "default">
ifneq ($(strip $(arm_version)),)
@@ -218,6 +216,12 @@ ifeq ($(arm_test_noprobe), on)
GYPFLAGS += -Darm_test_noprobe=on
endif
+# Optionally enable wasm prototype.
+# Assume you've placed a link to v8-native-prototype in third_party/wasm.
+ifeq ($(wasm), on)
+ GYPFLAGS += -Dv8_wasm=1
+endif
+
# ----------------- available targets: --------------------
# - "grokdump": rebuilds heap constants lists used by grokdump
# - any arch listed in ARCHES (see below)
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index d6db77ffe0..2c5caeb125 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -1,21 +1,21 @@
adamk@chromium.org
-arv@chromium.org
bmeurer@chromium.org
danno@chromium.org
-dcarney@chromium.org
-dslomov@chromium.org
+epertoso@chromium.org
+hablich@chromium.org
hpayer@chromium.org
ishell@chromium.org
jarin@chromium.org
jkummerow@chromium.org
jochen@chromium.org
+littledan@chromium.org
machenbach@chromium.org
+mlippautz@chromium.org
marja@chromium.org
mstarzinger@chromium.org
mvstanton@chromium.org
rmcilroy@chromium.org
rossberg@chromium.org
-svenpanne@chromium.org
titzer@chromium.org
ulan@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 34e09b8ec5..1bcd9922c5 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -141,6 +141,39 @@ def _CheckUnwantedDependencies(input_api, output_api):
return results
+def _CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api):
+ """Attempts to prevent inclusion of inline headers into normal header
+ files. This tries to establish a layering where inline headers can be
+ included by other inline headers or compilation units only."""
+ file_inclusion_pattern = r'(?!.+-inl\.h).+\.h'
+ include_directive_pattern = input_api.re.compile(r'#include ".+-inl.h"')
+ include_warning = (
+ 'You might be including an inline header (e.g. foo-inl.h) within a\n'
+ 'normal header (e.g. bar.h) file. Can you avoid introducing the\n'
+ '#include? The commit queue will not block on this warning.')
+
+ def FilterFile(affected_file):
+ black_list = (_EXCLUDED_PATHS +
+ input_api.DEFAULT_BLACK_LIST)
+ return input_api.FilterSourceFile(
+ affected_file,
+ white_list=(file_inclusion_pattern, ),
+ black_list=black_list)
+
+ problems = []
+ for f in input_api.AffectedSourceFiles(FilterFile):
+ local_path = f.LocalPath()
+ for line_number, line in f.ChangedContents():
+ if (include_directive_pattern.search(line)):
+ problems.append(
+ '%s:%d\n %s' % (local_path, line_number, line.strip()))
+
+ if problems:
+ return [output_api.PresubmitPromptOrNotify(include_warning, problems)]
+ else:
+ return []
+
+
def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
"""Attempts to prevent use of functions intended only for testing in
non-testing code. For now this is just a best-effort implementation
@@ -195,6 +228,8 @@ def _CommonChecks(input_api, output_api):
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
+ results.extend(
+ _CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api))
return results
@@ -209,28 +244,32 @@ def _SkipTreeCheck(input_api, output_api):
return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip'
-def _CheckChangeLogFlag(input_api, output_api):
+def _CheckChangeLogFlag(input_api, output_api, warn):
"""Checks usage of LOG= flag in the commit message."""
results = []
- if input_api.change.BUG and not 'LOG' in input_api.change.tags:
- results.append(output_api.PresubmitError(
- 'An issue reference (BUG=) requires a change log flag (LOG=). '
- 'Use LOG=Y for including this commit message in the change log. '
- 'Use LOG=N or leave blank otherwise.'))
+ if (input_api.change.BUG and input_api.change.BUG != 'none' and
+ not 'LOG' in input_api.change.tags):
+ text = ('An issue reference (BUG=) requires a change log flag (LOG=). '
+ 'Use LOG=Y for including this commit message in the change log. '
+ 'Use LOG=N or leave blank otherwise.')
+ if warn:
+ results.append(output_api.PresubmitPromptWarning(text))
+ else:
+ results.append(output_api.PresubmitError(text))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
- results.extend(_CheckChangeLogFlag(input_api, output_api))
+ results.extend(_CheckChangeLogFlag(input_api, output_api, True))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
- results.extend(_CheckChangeLogFlag(input_api, output_api))
+ results.extend(_CheckChangeLogFlag(input_api, output_api, False))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
if not _SkipTreeCheck(input_api, output_api):
diff --git a/deps/v8/WATCHLISTS b/deps/v8/WATCHLISTS
index 64771bc27d..f57dfa1402 100644
--- a/deps/v8/WATCHLISTS
+++ b/deps/v8/WATCHLISTS
@@ -40,7 +40,7 @@
'filepath': 'src/snapshot/',
},
'debugger': {
- 'filepath': 'src/debug\.(cc|h)|src/.*-debugger\.js|src/runtime/runtime-debug\.cc',
+ 'filepath': 'src/debug/',
},
},
diff --git a/deps/v8/build/download_gold_plugin.py b/deps/v8/build/download_gold_plugin.py
new file mode 100755
index 0000000000..7a0c21b8d7
--- /dev/null
+++ b/deps/v8/build/download_gold_plugin.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Script to download LLVM gold plugin from google storage."""
+
+import json
+import os
+import shutil
+import subprocess
+import sys
+import zipfile
+
+SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
+CHROME_SRC = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
+sys.path.insert(0, os.path.join(CHROME_SRC, 'tools'))
+
+import find_depot_tools
+
+DEPOT_PATH = find_depot_tools.add_depot_tools_to_path()
+GSUTIL_PATH = os.path.join(DEPOT_PATH, 'gsutil.py')
+
+LLVM_BUILD_PATH = os.path.join(CHROME_SRC, 'third_party', 'llvm-build',
+ 'Release+Asserts')
+CLANG_UPDATE_PY = os.path.join(CHROME_SRC, 'tools', 'clang', 'scripts',
+ 'update.py')
+CLANG_REVISION = os.popen(CLANG_UPDATE_PY + ' --print-revision').read().rstrip()
+
+CLANG_BUCKET = 'gs://chromium-browser-clang/Linux_x64'
+
+def main():
+ targz_name = 'llvmgold-%s.tgz' % CLANG_REVISION
+ remote_path = '%s/%s' % (CLANG_BUCKET, targz_name)
+
+ os.chdir(LLVM_BUILD_PATH)
+
+ # TODO(pcc): Fix gsutil.py cp url file < /dev/null 2>&0
+ # (currently aborts with exit code 1,
+ # https://github.com/GoogleCloudPlatform/gsutil/issues/289) or change the
+ # stdin->stderr redirect in update.py to do something else (crbug.com/494442).
+ subprocess.check_call(['python', GSUTIL_PATH,
+ 'cp', remote_path, targz_name],
+ stderr=open('/dev/null', 'w'))
+ subprocess.check_call(['tar', 'xzf', targz_name])
+ os.remove(targz_name)
+ return 0
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/deps/v8/build/features.gypi b/deps/v8/build/features.gypi
index 59d15134dd..21e55740ae 100644
--- a/deps/v8/build/features.gypi
+++ b/deps/v8/build/features.gypi
@@ -64,6 +64,9 @@
# Set to 1 to enable DCHECKs in release builds.
'dcheck_always_on%': 0,
+
+ # Set to 1 to enable building with wasm prototype.
+ 'v8_wasm%': 0,
},
'target_defaults': {
'conditions': [
@@ -103,6 +106,9 @@
['dcheck_always_on!=0', {
'defines': ['DEBUG',],
}],
+ ['v8_wasm!=0', {
+ 'defines': ['V8_WASM',],
+ }],
], # conditions
'configurations': {
'DebugBaseCommon': {
diff --git a/deps/v8/build/get_landmines.py b/deps/v8/build/get_landmines.py
index 298010f824..434b980c6d 100755
--- a/deps/v8/build/get_landmines.py
+++ b/deps/v8/build/get_landmines.py
@@ -23,6 +23,7 @@ def main():
print 'Clobber after ICU roll.'
print 'Moar clobbering...'
print 'Remove build/android.gypi'
+ print 'Cleanup after windows ninja switch attempt.'
return 0
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi
index 2cd0b51d43..bf06bfa20f 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/build/standalone.gypi
@@ -88,6 +88,13 @@
'clang_dir%': '<(base_dir)/third_party/llvm-build/Release+Asserts',
+ 'use_lto%': 0,
+
+ # Control Flow Integrity for virtual calls and casts.
+ # See http://clang.llvm.org/docs/ControlFlowIntegrity.html
+ 'cfi_vptr%': 0,
+ 'cfi_diag%': 0,
+
# goma settings.
# 1 to use goma.
# If no gomadir is set, it uses the default gomadir.
@@ -105,6 +112,16 @@
}, {
'host_clang%': '0',
}],
+ # linux_use_bundled_gold: whether to use the gold linker binary checked
+ # into third_party/binutils. Force this off via GYP_DEFINES when you
+ # are using a custom toolchain and need to control -B in ldflags.
+ # Do not use 32-bit gold on 32-bit hosts as it runs out address space
+ # for component=static_library builds.
+ ['(OS=="linux" or OS=="android") and (target_arch=="x64" or target_arch=="arm" or (target_arch=="ia32" and host_arch=="x64"))', {
+ 'linux_use_bundled_gold%': 1,
+ }, {
+ 'linux_use_bundled_gold%': 0,
+ }],
],
},
'base_dir%': '<(base_dir)',
@@ -122,6 +139,10 @@
'tsan%': '<(tsan)',
'sanitizer_coverage%': '<(sanitizer_coverage)',
'use_custom_libcxx%': '<(use_custom_libcxx)',
+ 'linux_use_bundled_gold%': '<(linux_use_bundled_gold)',
+ 'use_lto%': '<(use_lto)',
+ 'cfi_vptr%': '<(cfi_vptr)',
+ 'cfi_diag%': '<(cfi_diag)',
# Add a simple extra solely for the purpose of the cctests
'v8_extra_library_files': ['../test/cctest/test-extra.js'],
@@ -148,7 +169,7 @@
# the JS builtins sources and the start snapshot.
# Embedders that don't use standalone.gypi will need to add
# their own default value.
- 'v8_use_external_startup_data%': 0,
+ 'v8_use_external_startup_data%': 1,
# Relative path to icu.gyp from this file.
'icu_gyp_path': '../third_party/icu/icu.gyp',
@@ -179,8 +200,8 @@
}],
],
}],
- ['(v8_target_arch=="ia32" or v8_target_arch=="x64" or v8_target_arch=="x87") and \
- (OS=="linux" or OS=="mac")', {
+ ['((v8_target_arch=="ia32" or v8_target_arch=="x64" or v8_target_arch=="x87") and \
+ (OS=="linux" or OS=="mac")) or (v8_target_arch=="ppc64" and OS=="linux")', {
'v8_enable_gdbjit%': 1,
}, {
'v8_enable_gdbjit%': 0,
@@ -207,10 +228,8 @@
# the C++ standard library is used.
'use_custom_libcxx%': 1,
}],
- ['OS=="linux"', {
- # Gradually roll out v8_use_external_startup_data.
- # Should eventually be default enabled on all platforms.
- 'v8_use_external_startup_data%': 1,
+ ['cfi_vptr==1', {
+ 'use_lto%': 1,
}],
['OS=="android"', {
# Location of Android NDK.
@@ -358,6 +377,19 @@
'Release': {
'cflags+': ['<@(release_extra_cflags)'],
},
+ 'conditions': [
+ ['OS=="win"', {
+ 'Optdebug_x64': {
+ 'inherit_from': ['Optdebug'],
+ },
+ 'Debug_x64': {
+ 'inherit_from': ['Debug'],
+ },
+ 'Release_x64': {
+ 'inherit_from': ['Release'],
+ },
+ }],
+ ],
},
'conditions':[
['(clang==1 or host_clang==1) and OS!="win"', {
@@ -522,6 +554,21 @@
}],
],
}],
+ ['linux_use_bundled_gold==1 and not (clang==0 and use_lto==1)', {
+ # Put our binutils, which contains gold in the search path. We pass
+ # the path to gold to the compiler. gyp leaves unspecified what the
+ # cwd is when running the compiler, so the normal gyp path-munging
+ # fails us. This hack gets the right path.
+ #
+ # Disabled when using GCC LTO because GCC also uses the -B search
+ # path at link time to find "as", and our bundled "as" can only
+ # target x86.
+ 'ldflags': [
+ # Note, Chromium allows ia32 host arch as well, we limit this to
+ # x64 in v8.
+ '-B<(base_dir)/third_party/binutils/Linux_x64/Release/bin',
+ ],
+ }],
],
},
}],
@@ -658,7 +705,85 @@
}],
],
'msvs_cygwin_dirs': ['<(DEPTH)/third_party/cygwin'],
- 'msvs_disabled_warnings': [4355, 4800],
+ 'msvs_disabled_warnings': [
+ # C4091: 'typedef ': ignored on left of 'X' when no variable is
+ # declared.
+ # This happens in a number of Windows headers. Dumb.
+ 4091,
+
+ # C4127: conditional expression is constant
+ # This warning can in theory catch dead code and other problems, but
+ # triggers in far too many desirable cases where the conditional
+ # expression is either set by macros or corresponds some legitimate
+ # compile-time constant expression (due to constant template args,
+ # conditionals comparing the sizes of different types, etc.). Some of
+ # these can be worked around, but it's not worth it.
+ 4127,
+
+ # C4351: new behavior: elements of array 'array' will be default
+ # initialized
+ # This is a silly "warning" that basically just alerts you that the
+ # compiler is going to actually follow the language spec like it's
+ # supposed to, instead of not following it like old buggy versions
+ # did. There's absolutely no reason to turn this on.
+ 4351,
+
+ # C4355: 'this': used in base member initializer list
+ # It's commonly useful to pass |this| to objects in a class'
+ # initializer list. While this warning can catch real bugs, most of
+ # the time the constructors in question don't attempt to call methods
+ # on the passed-in pointer (until later), and annotating every legit
+ # usage of this is simply more hassle than the warning is worth.
+ 4355,
+
+ # C4503: 'identifier': decorated name length exceeded, name was
+ # truncated
+ # This only means that some long error messages might have truncated
+ # identifiers in the presence of lots of templates. It has no effect
+ # on program correctness and there's no real reason to waste time
+ # trying to prevent it.
+ 4503,
+
+ # Warning C4589 says: "Constructor of abstract class ignores
+ # initializer for virtual base class." Disable this warning because it
+ # is flaky in VS 2015 RTM. It triggers on compiler generated
+ # copy-constructors in some cases.
+ 4589,
+
+ # C4611: interaction between 'function' and C++ object destruction is
+ # non-portable
+ # This warning is unavoidable when using e.g. setjmp/longjmp. MSDN
+ # suggests using exceptions instead of setjmp/longjmp for C++, but
+ # Chromium code compiles without exception support. We therefore have
+ # to use setjmp/longjmp for e.g. JPEG decode error handling, which
+ # means we have to turn off this warning (and be careful about how
+ # object destruction happens in such cases).
+ 4611,
+
+ # TODO(jochen): These warnings are level 4. They will be slowly
+ # removed as code is fixed.
+ 4100, # Unreferenced formal parameter
+ 4121, # Alignment of a member was sensitive to packing
+ 4244, # Conversion from 'type1' to 'type2', possible loss of data
+ 4302, # Truncation from 'type 1' to 'type 2'
+ 4309, # Truncation of constant value
+ 4311, # Pointer truncation from 'type' to 'type'
+ 4312, # Conversion from 'type1' to 'type2' of greater size
+ 4481, # Nonstandard extension used: override specifier 'keyword'
+ 4505, # Unreferenced local function has been removed
+ 4510, # Default constructor could not be generated
+ 4512, # Assignment operator could not be generated
+ 4610, # Object can never be instantiated
+ 4800, # Forcing value to bool.
+ 4838, # Narrowing conversion. Doesn't seem to be very useful.
+ 4995, # 'X': name was marked as #pragma deprecated
+ 4996, # 'X': was declared deprecated (for GetVersionEx).
+
+ # These are variable shadowing warnings that are new in VS2015. We
+ # should work through these at some point -- they may be removed from
+ # the RTM release in the /W4 set.
+ 4456, 4457, 4458, 4459,
+ ],
'msvs_settings': {
'VCCLCompilerTool': {
'MinimalRebuild': 'false',
@@ -774,6 +899,12 @@
'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
'CLANG_CXX_LANGUAGE_STANDARD': 'gnu++0x', # -std=gnu++0x
},
+ 'conditions': [
+ ['v8_target_arch=="x64" or v8_target_arch=="arm64" \
+ or v8_target_arch=="mips64el"', {
+ 'xcode_settings': {'WARNING_CFLAGS': ['-Wshorten-64-to-32']},
+ }],
+ ],
}],
],
'target_conditions': [
@@ -1047,5 +1178,100 @@
['CXX.host_wrapper', '<(gomadir)/gomacc'],
],
}],
+ ['use_lto==1', {
+ 'target_defaults': {
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'cflags': [
+ '-flto',
+ ],
+ }],
+ ],
+ },
+ }],
+ ['use_lto==1 and clang==0', {
+ 'target_defaults': {
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'cflags': [
+ '-ffat-lto-objects',
+ ],
+ }],
+ ],
+ },
+ }],
+ ['use_lto==1 and clang==1', {
+ 'target_defaults': {
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'arflags': [
+ '--plugin', '<(clang_dir)/lib/LLVMgold.so',
+ ],
+ # Apply a lower optimization level with lto. Chromium does this
+ # for non-official builds only - a differentiation that doesn't
+ # exist in v8.
+ 'ldflags': [
+ '-Wl,--plugin-opt,O1',
+ ],
+ }],
+ ],
+ },
+ }],
+ ['use_lto==1 and clang==0', {
+ 'target_defaults': {
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'ldflags': [
+ '-flto=32',
+ ],
+ }],
+ ],
+ },
+ }],
+ ['use_lto==1 and clang==1', {
+ 'target_defaults': {
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'ldflags': [
+ '-flto',
+ ],
+ }],
+ ],
+ },
+ }],
+ ['cfi_diag==1', {
+ 'target_defaults': {
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'cflags': [
+ '-fno-sanitize-trap=cfi',
+ '-fsanitize-recover=cfi',
+ ],
+ 'ldflags': [
+ '-fno-sanitize-trap=cfi',
+ '-fsanitize-recover=cfi',
+ ],
+ }],
+ ],
+ },
+ }],
+ ['cfi_vptr==1', {
+ 'target_defaults': {
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'cflags': [
+ '-fsanitize=cfi-vcall',
+ '-fsanitize=cfi-derived-cast',
+ '-fsanitize=cfi-unrelated-cast',
+ ],
+ 'ldflags': [
+ '-fsanitize=cfi-vcall',
+ '-fsanitize=cfi-derived-cast',
+ '-fsanitize=cfi-unrelated-cast',
+ ],
+ }],
+ ],
+ },
+ }],
],
}
diff --git a/deps/v8/build/toolchain.gypi b/deps/v8/build/toolchain.gypi
index 4dbf42bfe3..a8a3b56ec2 100644
--- a/deps/v8/build/toolchain.gypi
+++ b/deps/v8/build/toolchain.gypi
@@ -1149,7 +1149,9 @@
}],
],
}],
- ['linux_use_gold_flags==1', {
+ # TODO(pcc): Re-enable in LTO builds once we've fixed the intermittent
+ # link failures (crbug.com/513074).
+ ['linux_use_gold_flags==1 and use_lto==0', {
'target_conditions': [
['_toolset=="target"', {
'ldflags': [
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index be9e5c0c6b..c6cba0f982 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -19,6 +19,20 @@ class Task {
virtual void Run() = 0;
};
+
+/**
+* An IdleTask represents a unit of work to be performed in idle time.
+* The Run method is invoked with an argument that specifies the deadline in
+* seconds returned by MonotonicallyIncreasingTime().
+* The idle task is expected to complete by this deadline.
+*/
+class IdleTask {
+ public:
+ virtual ~IdleTask() {}
+ virtual void Run(double deadline_in_seconds) = 0;
+};
+
+
/**
* V8 Platform abstraction layer.
*
@@ -63,8 +77,26 @@ class Platform {
* scheduling. The definition of "foreground" is opaque to V8.
*/
virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
- double delay_in_seconds) {
+ double delay_in_seconds) = 0;
+
+ /**
+ * Schedules a task to be invoked on a foreground thread wrt a specific
+ * |isolate| when the embedder is idle.
+ * Requires that SupportsIdleTasks(isolate) is true.
+ * Idle tasks may be reordered relative to other task types and may be
+ * starved for an arbitrarily long time if no idle time is available.
+ * The definition of "foreground" is opaque to V8.
+ */
+ virtual void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) {
+ // TODO(ulan): Make this function abstract after V8 roll in Chromium.
+ }
+
+ /**
+ * Returns true if idle tasks are enabled for the given |isolate|.
+ */
+ virtual bool IdleTasksEnabled(Isolate* isolate) {
// TODO(ulan): Make this function abstract after V8 roll in Chromium.
+ return false;
}
/**
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
index 6454a19b7e..c996c9997d 100644
--- a/deps/v8/include/v8-util.h
+++ b/deps/v8/include/v8-util.h
@@ -133,6 +133,8 @@ class DefaultGlobalMapTraits : public StdMapTraits<K, V> {
return K();
}
static void DisposeCallbackData(WeakCallbackDataType* data) {}
+ static void OnWeakCallback(
+ const WeakCallbackInfo<WeakCallbackDataType>& data) {}
static void Dispose(Isolate* isolate, Global<V> value, K key) {}
// This is a second pass callback, so SetSecondPassCallback cannot be called.
static void DisposeWeak(const WeakCallbackInfo<WeakCallbackDataType>& data) {}
@@ -452,7 +454,7 @@ class GlobalValueMap : public PersistentValueMapBase<K, V, Traits> {
: WeakCallbackType::kParameter;
Local<V> value(Local<V>::New(this->isolate(), *persistent));
persistent->template SetWeak<typename Traits::WeakCallbackDataType>(
- Traits::WeakCallbackParameter(this, key, value), FirstWeakCallback,
+ Traits::WeakCallbackParameter(this, key, value), OnWeakCallback,
callback_type);
}
PersistentContainerValue old_value =
@@ -471,12 +473,13 @@ class GlobalValueMap : public PersistentValueMapBase<K, V, Traits> {
}
private:
- static void FirstWeakCallback(
+ static void OnWeakCallback(
const WeakCallbackInfo<typename Traits::WeakCallbackDataType>& data) {
if (Traits::kCallbackType != kNotWeak) {
auto map = Traits::MapFromWeakCallbackInfo(data);
K key = Traits::KeyFromWeakCallbackInfo(data);
map->RemoveWeak(key);
+ Traits::OnWeakCallback(data);
data.SetSecondPassCallback(SecondWeakCallback);
}
}
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 98dca238c8..85a2ced31e 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 4
-#define V8_MINOR_VERSION 5
-#define V8_BUILD_NUMBER 103
-#define V8_PATCH_LEVEL 35
+#define V8_MINOR_VERSION 6
+#define V8_BUILD_NUMBER 85
+#define V8_PATCH_LEVEL 23
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 062dd5f21f..9a577765b4 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -509,6 +509,10 @@ template <class T> class PersistentBase {
V8_INLINE bool IsEmpty() const { return val_ == NULL; }
V8_INLINE void Empty() { val_ = 0; }
+ V8_INLINE Local<T> Get(Isolate* isolate) const {
+ return Local<T>::New(isolate, *this);
+ }
+
template <class S>
V8_INLINE bool operator==(const PersistentBase<S>& that) const {
internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
@@ -634,8 +638,8 @@ template <class T> class PersistentBase {
friend class Object;
explicit V8_INLINE PersistentBase(T* val) : val_(val) {}
- PersistentBase(PersistentBase& other) = delete; // NOLINT
- void operator=(PersistentBase&) = delete;
+ PersistentBase(const PersistentBase& other) = delete; // NOLINT
+ void operator=(const PersistentBase&) = delete;
V8_INLINE static T* New(Isolate* isolate, T* that);
T* val_;
@@ -841,8 +845,8 @@ class Global : public PersistentBase<T> {
private:
template <class F>
friend class ReturnValue;
- Global(Global&) = delete;
- void operator=(Global&) = delete;
+ Global(const Global&) = delete;
+ void operator=(const Global&) = delete;
V8_INLINE T* operator*() const { return this->val_; }
};
@@ -1110,11 +1114,6 @@ class V8_EXPORT Script {
* Returns the corresponding context-unbound script.
*/
Local<UnboundScript> GetUnboundScript();
-
- V8_DEPRECATED("Use GetUnboundScript()->GetId()",
- int GetId()) {
- return GetUnboundScript()->GetId();
- }
};
@@ -1386,15 +1385,13 @@ class V8_EXPORT ScriptCompiler {
/**
* Compile an ES6 module.
*
- * This is an experimental feature.
+ * This is an unfinished experimental feature, and is only exposed
+ * here for internal testing purposes.
+ * Only parsing works at the moment. Do not use.
*
* TODO(adamk): Script is likely the wrong return value for this;
* should return some new Module type.
*/
- static V8_DEPRECATE_SOON(
- "Use maybe version",
- Local<Script> CompileModule(Isolate* isolate, Source* source,
- CompileOptions options = kNoCompileOptions));
static V8_WARN_UNUSED_RESULT MaybeLocal<Script> CompileModule(
Local<Context> context, Source* source,
CompileOptions options = kNoCompileOptions);
@@ -3004,8 +3001,9 @@ class V8_EXPORT Map : public Object {
* in the same manner as the array returned from AsArray().
* Guaranteed to be side-effect free if the array contains no holes.
*/
- static V8_WARN_UNUSED_RESULT MaybeLocal<Map> FromArray(Local<Context> context,
- Local<Array> array);
+ static V8_WARN_UNUSED_RESULT V8_DEPRECATED(
+ "Use mutation methods instead",
+ MaybeLocal<Map> FromArray(Local<Context> context, Local<Array> array));
V8_INLINE static Map* Cast(Value* obj);
@@ -3043,8 +3041,9 @@ class V8_EXPORT Set : public Object {
* Creates a new Set containing the items in array.
* Guaranteed to be side-effect free if the array contains no holes.
*/
- static V8_WARN_UNUSED_RESULT MaybeLocal<Set> FromArray(Local<Context> context,
- Local<Array> array);
+ static V8_WARN_UNUSED_RESULT V8_DEPRECATED(
+ "Use mutation methods instead",
+ MaybeLocal<Set> FromArray(Local<Context> context, Local<Array> array));
V8_INLINE static Set* Cast(Value* obj);
@@ -4804,12 +4803,6 @@ class V8_EXPORT ResourceConstraints {
void ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit);
- // Deprecated, will be removed soon.
- V8_DEPRECATED("Use two-args version instead",
- void ConfigureDefaults(uint64_t physical_memory,
- uint64_t virtual_memory_limit,
- uint32_t number_of_processors));
-
int max_semi_space_size() const { return max_semi_space_size_; }
void set_max_semi_space_size(int value) { max_semi_space_size_ = value; }
int max_old_space_size() const { return max_old_space_size_; }
@@ -4819,14 +4812,6 @@ class V8_EXPORT ResourceConstraints {
uint32_t* stack_limit() const { return stack_limit_; }
// Sets an address beyond which the VM's stack may not grow.
void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
- V8_DEPRECATED("Unused, will be removed", int max_available_threads() const) {
- return max_available_threads_;
- }
- // Set the number of threads available to V8, assuming at least 1.
- V8_DEPRECATED("Unused, will be removed",
- void set_max_available_threads(int value)) {
- max_available_threads_ = value;
- }
size_t code_range_size() const { return code_range_size_; }
void set_code_range_size(size_t value) {
code_range_size_ = value;
@@ -4837,7 +4822,6 @@ class V8_EXPORT ResourceConstraints {
int max_old_space_size_;
int max_executable_size_;
uint32_t* stack_limit_;
- int max_available_threads_;
size_t code_range_size_;
};
@@ -4967,27 +4951,35 @@ typedef bool (*AllowCodeGenerationFromStringsCallback)(Local<Context> context);
// --- Garbage Collection Callbacks ---
/**
- * Applications can register callback functions which will be called
- * before and after a garbage collection. Allocations are not
- * allowed in the callback functions, you therefore cannot manipulate
- * objects (set or delete properties for example) since it is possible
- * such operations will result in the allocation of objects.
+ * Applications can register callback functions which will be called before and
+ * after certain garbage collection operations. Allocations are not allowed in
+ * the callback functions, you therefore cannot manipulate objects (set or
+ * delete properties for example) since it is possible such operations will
+ * result in the allocation of objects.
*/
enum GCType {
kGCTypeScavenge = 1 << 0,
kGCTypeMarkSweepCompact = 1 << 1,
- kGCTypeAll = kGCTypeScavenge | kGCTypeMarkSweepCompact
+ kGCTypeIncrementalMarking = 1 << 2,
+ kGCTypeProcessWeakCallbacks = 1 << 3,
+ kGCTypeAll = kGCTypeScavenge | kGCTypeMarkSweepCompact |
+ kGCTypeIncrementalMarking | kGCTypeProcessWeakCallbacks
};
enum GCCallbackFlags {
kNoGCCallbackFlags = 0,
- kGCCallbackFlagCompacted = 1 << 0,
kGCCallbackFlagConstructRetainedObjectInfos = 1 << 1,
- kGCCallbackFlagForced = 1 << 2
+ kGCCallbackFlagForced = 1 << 2,
+ kGCCallbackFlagSynchronousPhantomCallbackProcessing = 1 << 3
};
-typedef void (*GCPrologueCallback)(GCType type, GCCallbackFlags flags);
-typedef void (*GCEpilogueCallback)(GCType type, GCCallbackFlags flags);
+V8_DEPRECATE_SOON("Use GCCallBack instead",
+ typedef void (*GCPrologueCallback)(GCType type,
+ GCCallbackFlags flags));
+V8_DEPRECATE_SOON("Use GCCallBack instead",
+ typedef void (*GCEpilogueCallback)(GCType type,
+ GCCallbackFlags flags));
+typedef void (*GCCallback)(GCType type, GCCallbackFlags flags);
typedef void (*InterruptCallback)(Isolate* isolate, void* data);
@@ -5370,8 +5362,6 @@ class V8_EXPORT Isolate {
*/
static Isolate* New(const CreateParams& params);
- static V8_DEPRECATED("Always pass CreateParams", Isolate* New());
-
/**
* Returns the entered isolate for the current thread or NULL in
* case there is no current isolate.
@@ -5381,19 +5371,6 @@ class V8_EXPORT Isolate {
static Isolate* GetCurrent();
/**
- * Custom callback used by embedders to help V8 determine if it should abort
- * when it throws and no internal handler is predicted to catch the
- * exception. If --abort-on-uncaught-exception is used on the command line,
- * then V8 will abort if either:
- * - no custom callback is set.
- * - the custom callback set returns true.
- * Otherwise, the custom callback will not be called and V8 will not abort.
- */
- typedef bool (*AbortOnUncaughtExceptionCallback)(Isolate*);
- void SetAbortOnUncaughtExceptionCallback(
- AbortOnUncaughtExceptionCallback callback);
-
- /**
* Methods below this point require holding a lock (using Locker) in
* a multi-threaded environment.
*/
@@ -5578,12 +5555,16 @@ class V8_EXPORT Isolate {
template<typename T, typename S>
void SetReference(const Persistent<T>& parent, const Persistent<S>& child);
- typedef void (*GCPrologueCallback)(Isolate* isolate,
- GCType type,
- GCCallbackFlags flags);
- typedef void (*GCEpilogueCallback)(Isolate* isolate,
- GCType type,
- GCCallbackFlags flags);
+ V8_DEPRECATE_SOON("Use GCCallBack instead",
+ typedef void (*GCPrologueCallback)(Isolate* isolate,
+ GCType type,
+ GCCallbackFlags flags));
+ V8_DEPRECATE_SOON("Use GCCallBack instead",
+ typedef void (*GCEpilogueCallback)(Isolate* isolate,
+ GCType type,
+ GCCallbackFlags flags));
+ typedef void (*GCCallback)(Isolate* isolate, GCType type,
+ GCCallbackFlags flags);
/**
* Enables the host application to receive a notification before a
@@ -5594,14 +5575,14 @@ class V8_EXPORT Isolate {
* not possible to register the same callback function two times with
* different GCType filters.
*/
- void AddGCPrologueCallback(
- GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll);
+ void AddGCPrologueCallback(GCCallback callback,
+ GCType gc_type_filter = kGCTypeAll);
/**
* This function removes callback which was installed by
* AddGCPrologueCallback function.
*/
- void RemoveGCPrologueCallback(GCPrologueCallback callback);
+ void RemoveGCPrologueCallback(GCCallback callback);
/**
* Enables the host application to receive a notification after a
@@ -5612,15 +5593,14 @@ class V8_EXPORT Isolate {
* not possible to register the same callback function two times with
* different GCType filters.
*/
- void AddGCEpilogueCallback(
- GCEpilogueCallback callback, GCType gc_type_filter = kGCTypeAll);
+ void AddGCEpilogueCallback(GCCallback callback,
+ GCType gc_type_filter = kGCTypeAll);
/**
* This function removes callback which was installed by
* AddGCEpilogueCallback function.
*/
- void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
-
+ void RemoveGCEpilogueCallback(GCCallback callback);
/**
* Forcefully terminate the current thread of JavaScript execution
@@ -5985,16 +5965,6 @@ class V8_EXPORT V8 {
AllowCodeGenerationFromStringsCallback that));
/**
- * Set allocator to use for ArrayBuffer memory.
- * The allocator should be set only once. The allocator should be set
- * before any code tha uses ArrayBuffers is executed.
- * This allocator is used in all isolates.
- */
- static V8_DEPRECATE_SOON(
- "Use isolate version",
- void SetArrayBufferAllocator(ArrayBuffer::Allocator* allocator));
-
- /**
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
*/
@@ -6087,7 +6057,7 @@ class V8_EXPORT V8 {
*/
static V8_DEPRECATE_SOON(
"Use isolate version",
- void AddGCPrologueCallback(GCPrologueCallback callback,
+ void AddGCPrologueCallback(GCCallback callback,
GCType gc_type_filter = kGCTypeAll));
/**
@@ -6096,7 +6066,7 @@ class V8_EXPORT V8 {
*/
V8_INLINE static V8_DEPRECATE_SOON(
"Use isolate version",
- void RemoveGCPrologueCallback(GCPrologueCallback callback));
+ void RemoveGCPrologueCallback(GCCallback callback));
/**
* Enables the host application to receive a notification after a
@@ -6110,7 +6080,7 @@ class V8_EXPORT V8 {
*/
static V8_DEPRECATE_SOON(
"Use isolate version",
- void AddGCEpilogueCallback(GCEpilogueCallback callback,
+ void AddGCEpilogueCallback(GCCallback callback,
GCType gc_type_filter = kGCTypeAll));
/**
@@ -6119,7 +6089,7 @@ class V8_EXPORT V8 {
*/
V8_INLINE static V8_DEPRECATE_SOON(
"Use isolate version",
- void RemoveGCEpilogueCallback(GCEpilogueCallback callback));
+ void RemoveGCEpilogueCallback(GCCallback callback));
/**
* Enables the host application to provide a mechanism to be notified
@@ -6664,10 +6634,12 @@ class V8_EXPORT Context {
V8_INLINE Local<Value> GetEmbedderData(int index);
/**
- * Gets the exports object used by V8 extras. Extra natives get a reference
- * to this object and can use it to export functionality.
+ * Gets the binding object used by V8 extras. Extra natives get a reference
+ * to this object and can use it to "export" functionality by adding
+ * properties. Extra natives can also "import" functionality by accessing
+ * properties added by the embedder using the V8 API.
*/
- Local<Object> GetExtrasExportsObject();
+ Local<Object> GetExtrasBindingObject();
/**
* Sets the embedder data with the given index, growing the data as
@@ -6720,6 +6692,11 @@ class V8_EXPORT Context {
void SetErrorMessageForCodeGenerationFromStrings(Local<String> message);
/**
+ * Estimate the memory in bytes retained by this context.
+ */
+ size_t EstimatedSize();
+
+ /**
* Stack-allocated class which sets the execution context for all
* operations executed within a local scope.
*/
@@ -6966,12 +6943,12 @@ class Internals {
1 * kApiPointerSize + kApiIntSize;
static const int kStringResourceOffset = 3 * kApiPointerSize;
- static const int kOddballKindOffset = 3 * kApiPointerSize;
+ static const int kOddballKindOffset = 4 * kApiPointerSize;
static const int kForeignAddressOffset = kApiPointerSize;
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
static const int kContextHeaderSize = 2 * kApiPointerSize;
- static const int kContextEmbedderDataIndex = 81;
+ static const int kContextEmbedderDataIndex = 27;
static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02;
@@ -7004,7 +6981,7 @@ class Internals {
static const int kNodeIsIndependentShift = 3;
static const int kNodeIsPartiallyDependentShift = 4;
- static const int kJSObjectType = 0xbe;
+ static const int kJSObjectType = 0xb6;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
@@ -8291,17 +8268,17 @@ void V8::SetFatalErrorHandler(FatalErrorCallback callback) {
}
-void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) {
+void V8::RemoveGCPrologueCallback(GCCallback callback) {
Isolate* isolate = Isolate::GetCurrent();
isolate->RemoveGCPrologueCallback(
- reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback));
+ reinterpret_cast<v8::Isolate::GCCallback>(callback));
}
-void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
+void V8::RemoveGCEpilogueCallback(GCCallback callback) {
Isolate* isolate = Isolate::GetCurrent();
isolate->RemoveGCEpilogueCallback(
- reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback));
+ reinterpret_cast<v8::Isolate::GCCallback>(callback));
}
diff --git a/deps/v8/infra/project-config/README.md b/deps/v8/infra/project-config/README.md
deleted file mode 100644
index 34a89cef0a..0000000000
--- a/deps/v8/infra/project-config/README.md
+++ /dev/null
@@ -1 +0,0 @@
-This directory contains v8 project-wide configurations for infra services.
diff --git a/deps/v8/infra/project-config/cr-buildbucket.cfg b/deps/v8/infra/project-config/cr-buildbucket.cfg
deleted file mode 100644
index 544940bbaf..0000000000
--- a/deps/v8/infra/project-config/cr-buildbucket.cfg
+++ /dev/null
@@ -1,23 +0,0 @@
-# Defines buckets on cr-buildbucket.appspot.com, used to schedule builds
-# on buildbot. In particular, CQ uses some of these buckets to schedule tryjobs.
-#
-# See http://luci-config.appspot.com/schemas/projects:buildbucket.cfg for
-# schema of this file and documentation.
-#
-# Please keep this list sorted by bucket name.
-
-buckets {
- name: "master.tryserver.v8"
- acls {
- role: READER
- group: "all"
- }
- acls {
- role: SCHEDULER
- group: "service-account-cq"
- }
- acls {
- role: WRITER
- group: "service-account-v8-master"
- }
-}
diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc
index 6f7a47f1b0..cfbd054c16 100644
--- a/deps/v8/samples/process.cc
+++ b/deps/v8/samples/process.cc
@@ -666,11 +666,13 @@ StringHttpRequest kSampleRequests[kSampleSize] = {
};
-bool ProcessEntries(HttpRequestProcessor* processor, int count,
- StringHttpRequest* reqs) {
+bool ProcessEntries(v8::Platform* platform, HttpRequestProcessor* processor,
+ int count, StringHttpRequest* reqs) {
for (int i = 0; i < count; i++) {
- if (!processor->Process(&reqs[i]))
- return false;
+ bool result = processor->Process(&reqs[i]);
+ while (v8::platform::PumpMessageLoop(platform, Isolate::GetCurrent()))
+ continue;
+ if (!result) return false;
}
return true;
}
@@ -714,7 +716,7 @@ int main(int argc, char* argv[]) {
fprintf(stderr, "Error initializing processor.\n");
return 1;
}
- if (!ProcessEntries(&processor, kSampleSize, kSampleRequests))
+ if (!ProcessEntries(platform, &processor, kSampleSize, kSampleRequests))
return 1;
PrintMap(&output);
}
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index bd621c5465..ad22285084 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -45,8 +45,9 @@
v8::Local<v8::Context> CreateShellContext(v8::Isolate* isolate);
-void RunShell(v8::Local<v8::Context> context);
-int RunMain(v8::Isolate* isolate, int argc, char* argv[]);
+void RunShell(v8::Local<v8::Context> context, v8::Platform* platform);
+int RunMain(v8::Isolate* isolate, v8::Platform* platform, int argc,
+ char* argv[]);
bool ExecuteString(v8::Isolate* isolate, v8::Local<v8::String> source,
v8::Local<v8::Value> name, bool print_result,
bool report_exceptions);
@@ -95,8 +96,8 @@ int main(int argc, char* argv[]) {
return 1;
}
v8::Context::Scope context_scope(context);
- result = RunMain(isolate, argc, argv);
- if (run_shell) RunShell(context);
+ result = RunMain(isolate, platform, argc, argv);
+ if (run_shell) RunShell(context, platform);
}
isolate->Dispose();
v8::V8::Dispose();
@@ -270,7 +271,8 @@ v8::MaybeLocal<v8::String> ReadFile(v8::Isolate* isolate, const char* name) {
// Process remaining command line arguments and execute files
-int RunMain(v8::Isolate* isolate, int argc, char* argv[]) {
+int RunMain(v8::Isolate* isolate, v8::Platform* platform, int argc,
+ char* argv[]) {
for (int i = 1; i < argc; i++) {
const char* str = argv[i];
if (strcmp(str, "--shell") == 0) {
@@ -293,7 +295,9 @@ int RunMain(v8::Isolate* isolate, int argc, char* argv[]) {
.ToLocal(&source)) {
return 1;
}
- if (!ExecuteString(isolate, source, file_name, false, true)) return 1;
+ bool success = ExecuteString(isolate, source, file_name, false, true);
+ while (v8::platform::PumpMessageLoop(platform, isolate)) continue;
+ if (!success) return 1;
} else {
// Use all other arguments as names of files to load and run.
v8::Local<v8::String> file_name =
@@ -304,7 +308,9 @@ int RunMain(v8::Isolate* isolate, int argc, char* argv[]) {
fprintf(stderr, "Error reading '%s'\n", str);
continue;
}
- if (!ExecuteString(isolate, source, file_name, false, true)) return 1;
+ bool success = ExecuteString(isolate, source, file_name, false, true);
+ while (v8::platform::PumpMessageLoop(platform, isolate)) continue;
+ if (!success) return 1;
}
}
return 0;
@@ -312,7 +318,7 @@ int RunMain(v8::Isolate* isolate, int argc, char* argv[]) {
// The read-eval-execute loop of the shell.
-void RunShell(v8::Local<v8::Context> context) {
+void RunShell(v8::Local<v8::Context> context, v8::Platform* platform) {
fprintf(stderr, "V8 version %s [sample shell]\n", v8::V8::GetVersion());
static const int kBufferSize = 256;
// Enter the execution environment before evaluating any code.
@@ -331,6 +337,8 @@ void RunShell(v8::Local<v8::Context> context) {
v8::String::NewFromUtf8(context->GetIsolate(), str,
v8::NewStringType::kNormal).ToLocalChecked(),
name, true, true);
+ while (v8::platform::PumpMessageLoop(platform, context->GetIsolate()))
+ continue;
}
fprintf(stderr, "\n");
}
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 4d320535f1..4b95456a17 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -2,11 +2,23 @@ include_rules = [
"+src",
"-src/compiler",
"+src/compiler/pipeline.h",
+ "-src/heap",
+ "+src/heap/heap.h",
+ "+src/heap/heap-inl.h",
+ "-src/interpreter",
+ "+src/interpreter/bytecodes.h",
+ "+src/interpreter/interpreter.h",
"-src/libplatform",
"-include/libplatform"
]
specific_include_rules = {
+ ".*\.h": [
+ # Note that src/v8.h is the top header for some .cc files, it shouldn't be
+ # included in any .h files though. In the long run we should make src/v8.h
+ # act like any normal header file, instead of a grab-bag include.
+ "-src/v8.h",
+ ],
"d8\.cc": [
"+include/libplatform/libplatform.h",
],
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index e77dedbaeb..dfaa938845 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -2,9 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/accessors.h"
+
#include "src/api.h"
#include "src/contexts.h"
#include "src/deoptimizer.h"
@@ -100,37 +99,22 @@ bool Accessors::IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
Isolate* isolate = name->GetIsolate();
switch (map->instance_type()) {
- case JS_TYPED_ARRAY_TYPE: {
- if (!CheckForName(name, isolate->factory()->length_string(),
- JSTypedArray::kLengthOffset, object_offset) &&
- !CheckForName(name, isolate->factory()->byte_length_string(),
- JSTypedArray::kByteLengthOffset, object_offset) &&
- !CheckForName(name, isolate->factory()->byte_offset_string(),
- JSTypedArray::kByteOffsetOffset, object_offset)) {
+ case JS_TYPED_ARRAY_TYPE:
+ // %TypedArray%.prototype is non-configurable, and so are the following
+ // named properties on %TypedArray%.prototype, so we can directly inline
+ // the field-load for typed array maps that still use their
+ // %TypedArray%.prototype.
+ if (JSFunction::cast(map->GetConstructor())->prototype() !=
+ map->prototype()) {
return false;
}
+ return CheckForName(name, isolate->factory()->length_string(),
+ JSTypedArray::kLengthOffset, object_offset) ||
+ CheckForName(name, isolate->factory()->byte_length_string(),
+ JSTypedArray::kByteLengthOffset, object_offset) ||
+ CheckForName(name, isolate->factory()->byte_offset_string(),
+ JSTypedArray::kByteOffsetOffset, object_offset);
- if (map->is_dictionary_map()) return false;
-
- // Check if the property is overridden on the instance.
- DescriptorArray* descriptors = map->instance_descriptors();
- int descriptor = descriptors->SearchWithCache(*name, *map);
- if (descriptor != DescriptorArray::kNotFound) return false;
-
- Handle<Object> proto = Handle<Object>(map->prototype(), isolate);
- if (!proto->IsJSReceiver()) return false;
-
- // Check if the property is defined in the prototype chain.
- LookupIterator it(proto, name);
- if (!it.IsFound()) return false;
-
- Object* original_proto =
- JSFunction::cast(map->GetConstructor())->prototype();
-
- // Property is not configurable. It is enough to verify that
- // the holder is the same.
- return *it.GetHolder<Object>() == original_proto;
- }
case JS_DATA_VIEW_TYPE:
return CheckForName(name, isolate->factory()->byte_length_string(),
JSDataView::kByteLengthOffset, object_offset) ||
@@ -1012,7 +996,6 @@ MUST_USE_RESULT static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
CHECK_EQ(LookupIterator::ACCESSOR, it.state());
DCHECK(it.HolderIsReceiverOrHiddenPrototype());
it.ReconfigureDataProperty(value, it.property_details().attributes());
- it.WriteDataValue(value);
if (is_observed && !old_value->SameValue(*value)) {
return JSObject::EnqueueChangeRecord(object, "update", name, old_value);
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 227af745b7..3c0079de89 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -5,12 +5,18 @@
#ifndef V8_ACCESSORS_H_
#define V8_ACCESSORS_H_
+#include "include/v8.h"
#include "src/allocation.h"
#include "src/globals.h"
+#include "src/handles.h"
+#include "src/property-details.h"
namespace v8 {
namespace internal {
+// Forward declarations.
+class ExecutableAccessorInfo;
+
// The list of accessor descriptors. This is a second-order macro
// taking a macro to be applied to all accessor descriptor names.
#define ACCESSOR_INFO_LIST(V) \
diff --git a/deps/v8/src/allocation-tracker.cc b/deps/v8/src/allocation-tracker.cc
index f8617cfc1e..128d25c8d2 100644
--- a/deps/v8/src/allocation-tracker.cc
+++ b/deps/v8/src/allocation-tracker.cc
@@ -6,7 +6,7 @@
#include "src/allocation-tracker.h"
#include "src/frames-inl.h"
-#include "src/heap-snapshot-generator.h"
+#include "src/heap-snapshot-generator-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/allocation-tracker.h b/deps/v8/src/allocation-tracker.h
index f3788b91a6..c409f2437b 100644
--- a/deps/v8/src/allocation-tracker.h
+++ b/deps/v8/src/allocation-tracker.h
@@ -7,12 +7,21 @@
#include <map>
+#include "include/v8-profiler.h"
+#include "src/handles.h"
+#include "src/hashmap.h"
+#include "src/list.h"
+#include "src/vector.h"
+
namespace v8 {
namespace internal {
-class HeapObjectsMap;
-
+// Forward declarations.
class AllocationTraceTree;
+class AllocationTracker;
+class HeapObjectsMap;
+class SharedFunctionInfo;
+class StringsStorage;
class AllocationTraceNode {
public:
diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api-natives.h
index 224c78a6ad..c5e398296c 100644
--- a/deps/v8/src/api-natives.h
+++ b/deps/v8/src/api-natives.h
@@ -6,10 +6,15 @@
#define V8_API_NATIVES_H_
#include "src/handles.h"
+#include "src/property-details.h"
namespace v8 {
namespace internal {
+// Forward declarations.
+class ObjectTemplateInfo;
+class TemplateInfo;
+
class ApiNatives {
public:
static const int kInitialFunctionCacheSize = 256;
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index a57171f8a3..fc88c5c203 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -20,17 +20,18 @@
#include "src/base/platform/time.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
+#include "src/char-predicates-inl.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
+#include "src/context-measure.h"
#include "src/contexts.h"
#include "src/conversions-inl.h"
#include "src/counters.h"
#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/global-handles.h"
-#include "src/heap/spaces.h"
#include "src/heap-profiler.h"
#include "src/heap-snapshot-generator-inl.h"
#include "src/icu_util.h"
@@ -51,6 +52,7 @@
#include "src/snapshot/snapshot.h"
#include "src/startup-data-util.h"
#include "src/unicode-inl.h"
+#include "src/v8.h"
#include "src/v8threads.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
@@ -368,14 +370,12 @@ StartupData V8::CreateSnapshotDataBlob(const char* custom_source) {
base::ElapsedTimer timer;
timer.Start();
Isolate::Scope isolate_scope(isolate);
- internal_isolate->set_creating_default_snapshot(true);
internal_isolate->Init(NULL);
Persistent<Context> context;
i::Snapshot::Metadata metadata;
{
HandleScope handle_scope(isolate);
Local<Context> new_context = Context::New(isolate);
- internal_isolate->set_creating_default_snapshot(false);
context.Reset(isolate, new_context);
if (custom_source != NULL) {
metadata.set_embeds_script(true);
@@ -384,16 +384,31 @@ StartupData V8::CreateSnapshotDataBlob(const char* custom_source) {
}
}
if (!context.IsEmpty()) {
- // Make sure all builtin scripts are cached.
- {
- HandleScope scope(isolate);
- for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
- internal_isolate->bootstrapper()->SourceLookup<i::Natives>(i);
- }
- }
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
internal_isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
+
+ // GC may have cleared weak cells, so compact any WeakFixedArrays
+ // found on the heap.
+ i::HeapIterator iterator(internal_isolate->heap(),
+ i::HeapIterator::kFilterUnreachable);
+ for (i::HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
+ if (o->IsPrototypeInfo()) {
+ i::Object* prototype_users =
+ i::PrototypeInfo::cast(o)->prototype_users();
+ if (prototype_users->IsWeakFixedArray()) {
+ i::WeakFixedArray* array = i::WeakFixedArray::cast(prototype_users);
+ array->Compact<i::JSObject::PrototypeRegistryCompactionCallback>();
+ }
+ } else if (o->IsScript()) {
+ i::Object* shared_list = i::Script::cast(o)->shared_function_infos();
+ if (shared_list->IsWeakFixedArray()) {
+ i::WeakFixedArray* array = i::WeakFixedArray::cast(shared_list);
+ array->Compact<i::WeakFixedArray::NullCallback>();
+ }
+ }
+ }
+
i::Object* raw_context = *v8::Utils::OpenPersistent(context);
context.Reset();
@@ -481,16 +496,9 @@ ResourceConstraints::ResourceConstraints()
max_old_space_size_(0),
max_executable_size_(0),
stack_limit_(NULL),
- max_available_threads_(0),
code_range_size_(0) { }
void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
- uint64_t virtual_memory_limit,
- uint32_t number_of_processors) {
- ConfigureDefaults(physical_memory, virtual_memory_limit);
-}
-
-void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit) {
#if V8_OS_ANDROID
// Android has higher physical memory requirements before raising the maximum
@@ -783,6 +791,7 @@ static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
bool can_grow,
const char* location) {
i::Handle<i::Context> env = Utils::OpenHandle(context);
+ i::Isolate* isolate = env->GetIsolate();
bool ok =
Utils::ApiCheck(env->IsNativeContext(),
location,
@@ -795,7 +804,8 @@ static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
return i::Handle<i::FixedArray>();
}
int new_size = i::Max(index, data->length() << 1) + 1;
- data = i::FixedArray::CopySize(data, new_size);
+ int grow_by = new_size - data->length();
+ data = isolate->factory()->CopyFixedArrayAndGrow(data, grow_by);
env->set_embedder_data(*data);
return data;
}
@@ -1813,13 +1823,6 @@ MaybeLocal<Script> ScriptCompiler::CompileModule(Local<Context> context,
}
-Local<Script> ScriptCompiler::CompileModule(Isolate* v8_isolate, Source* source,
- CompileOptions options) {
- auto context = v8_isolate->GetCurrentContext();
- RETURN_TO_LOCAL_UNCHECKED(CompileModule(context, source, options), Script);
-}
-
-
class IsIdentifierHelper {
public:
IsIdentifierHelper() : is_identifier_(false), first_char_(true) {}
@@ -1916,11 +1919,27 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
context = factory->NewWithContext(closure, context, extension);
}
+ i::Handle<i::Object> name_obj;
+ int line_offset = 0;
+ int column_offset = 0;
+ if (!source->resource_name.IsEmpty()) {
+ name_obj = Utils::OpenHandle(*(source->resource_name));
+ }
+ if (!source->resource_line_offset.IsEmpty()) {
+ line_offset = static_cast<int>(source->resource_line_offset->Value());
+ }
+ if (!source->resource_column_offset.IsEmpty()) {
+ column_offset = static_cast<int>(source->resource_column_offset->Value());
+ }
i::Handle<i::JSFunction> fun;
- has_pending_exception =
- !i::Compiler::GetFunctionFromEval(
- source_string, outer_info, context, i::SLOPPY,
- i::ONLY_SINGLE_FUNCTION_LITERAL, scope_position).ToHandle(&fun);
+ has_pending_exception = !i::Compiler::GetFunctionFromEval(
+ source_string, outer_info, context, i::SLOPPY,
+ i::ONLY_SINGLE_FUNCTION_LITERAL, line_offset,
+ column_offset - scope_position, name_obj,
+ source->resource_options).ToHandle(&fun);
+ if (has_pending_exception) {
+ isolate->ReportPendingMessages();
+ }
RETURN_ON_FAILED_EXECUTION(Function);
i::Handle<i::Object> result;
@@ -1983,11 +2002,11 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
// Do the parsing tasks which need to be done on the main thread. This will
// also handle parse errors.
source->parser->Internalize(isolate, script,
- source->info->function() == nullptr);
+ source->info->literal() == nullptr);
source->parser->HandleSourceURLComments(isolate, script);
i::Handle<i::SharedFunctionInfo> result;
- if (source->info->function() != nullptr) {
+ if (source->info->literal() != nullptr) {
// Parsing has succeeded.
result = i::Compiler::CompileStreamedScript(script, source->info.get(),
str->length());
@@ -2256,31 +2275,15 @@ v8::Local<v8::StackTrace> Message::GetStackTrace() const {
}
-MUST_USE_RESULT static i::MaybeHandle<i::Object> CallV8HeapFunction(
- i::Isolate* isolate, const char* name, i::Handle<i::Object> recv, int argc,
- i::Handle<i::Object> argv[]) {
- i::Handle<i::Object> object_fun =
- i::Object::GetProperty(
- isolate, isolate->js_builtins_object(), name).ToHandleChecked();
- i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(object_fun);
- return i::Execution::Call(isolate, fun, recv, argc, argv);
-}
-
-
-MUST_USE_RESULT static i::MaybeHandle<i::Object> CallV8HeapFunction(
- i::Isolate* isolate, const char* name, i::Handle<i::Object> data) {
- i::Handle<i::Object> argv[] = { data };
- return CallV8HeapFunction(isolate, name, isolate->js_builtins_object(),
- arraysize(argv), argv);
-}
-
-
Maybe<int> Message::GetLineNumber(Local<Context> context) const {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetLineNumber()", int);
+ i::Handle<i::JSFunction> fun = isolate->message_get_line_number();
+ i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
+ i::Handle<i::Object> args[] = {Utils::OpenHandle(this)};
i::Handle<i::Object> result;
has_pending_exception =
- !CallV8HeapFunction(isolate, "$messageGetLineNumber",
- Utils::OpenHandle(this)).ToHandle(&result);
+ !i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int);
return Just(static_cast<int>(result->Number()));
}
@@ -2307,13 +2310,15 @@ int Message::GetEndPosition() const {
Maybe<int> Message::GetStartColumn(Local<Context> context) const {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetStartColumn()",
int);
- auto self = Utils::OpenHandle(this);
- i::Handle<i::Object> start_col_obj;
+ i::Handle<i::JSFunction> fun = isolate->message_get_column_number();
+ i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
+ i::Handle<i::Object> args[] = {Utils::OpenHandle(this)};
+ i::Handle<i::Object> result;
has_pending_exception =
- !CallV8HeapFunction(isolate, "$messageGetPositionInLine", self)
- .ToHandle(&start_col_obj);
+ !i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int);
- return Just(static_cast<int>(start_col_obj->Number()));
+ return Just(static_cast<int>(result->Number()));
}
@@ -2325,16 +2330,19 @@ int Message::GetStartColumn() const {
Maybe<int> Message::GetEndColumn(Local<Context> context) const {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetEndColumn()", int);
auto self = Utils::OpenHandle(this);
- i::Handle<i::Object> start_col_obj;
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetEndColumn()", int);
+ i::Handle<i::JSFunction> fun = isolate->message_get_column_number();
+ i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
+ i::Handle<i::Object> args[] = {self};
+ i::Handle<i::Object> result;
has_pending_exception =
- !CallV8HeapFunction(isolate, "$messageGetPositionInLine", self)
- .ToHandle(&start_col_obj);
+ !i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int);
int start = self->start_position();
int end = self->end_position();
- return Just(static_cast<int>(start_col_obj->Number()) + (end - start));
+ return Just(static_cast<int>(result->Number()) + (end - start));
}
@@ -2368,10 +2376,13 @@ bool Message::IsOpaque() const {
MaybeLocal<String> Message::GetSourceLine(Local<Context> context) const {
PREPARE_FOR_EXECUTION(context, "v8::Message::GetSourceLine()", String);
+ i::Handle<i::JSFunction> fun = isolate->message_get_source_line();
+ i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
+ i::Handle<i::Object> args[] = {Utils::OpenHandle(this)};
i::Handle<i::Object> result;
has_pending_exception =
- !CallV8HeapFunction(isolate, "$messageGetSourceLine",
- Utils::OpenHandle(this)).ToHandle(&result);
+ !i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(String);
Local<String> str;
if (result->IsString()) {
@@ -2787,34 +2798,23 @@ bool Value::IsUint32() const {
}
-static bool CheckConstructor(i::Isolate* isolate,
- i::Handle<i::JSObject> obj,
- const char* class_name) {
- i::Handle<i::Object> constr(obj->map()->GetConstructor(), isolate);
- if (!constr->IsJSFunction()) return false;
- i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(constr);
- return func->shared()->native() && constr.is_identical_to(
- i::Object::GetProperty(isolate,
- isolate->js_builtins_object(),
- class_name).ToHandleChecked());
-}
-
-
bool Value::IsNativeError() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsJSObject()) {
- i::Handle<i::JSObject> js_obj(i::JSObject::cast(*obj));
- i::Isolate* isolate = js_obj->GetIsolate();
- return CheckConstructor(isolate, js_obj, "$Error") ||
- CheckConstructor(isolate, js_obj, "$EvalError") ||
- CheckConstructor(isolate, js_obj, "$RangeError") ||
- CheckConstructor(isolate, js_obj, "$ReferenceError") ||
- CheckConstructor(isolate, js_obj, "$SyntaxError") ||
- CheckConstructor(isolate, js_obj, "$TypeError") ||
- CheckConstructor(isolate, js_obj, "$URIError");
- } else {
- return false;
- }
+ if (!obj->IsJSObject()) return false;
+ i::Handle<i::JSObject> js_obj = i::Handle<i::JSObject>::cast(obj);
+ i::Isolate* isolate = js_obj->GetIsolate();
+ i::Handle<i::Object> constructor(js_obj->map()->GetConstructor(), isolate);
+ if (!constructor->IsJSFunction()) return false;
+ i::Handle<i::JSFunction> function =
+ i::Handle<i::JSFunction>::cast(constructor);
+ if (!function->shared()->native()) return false;
+ return function.is_identical_to(isolate->error_function()) ||
+ function.is_identical_to(isolate->eval_error_function()) ||
+ function.is_identical_to(isolate->range_error_function()) ||
+ function.is_identical_to(isolate->reference_error_function()) ||
+ function.is_identical_to(isolate->syntax_error_function()) ||
+ function.is_identical_to(isolate->type_error_function()) ||
+ function.is_identical_to(isolate->uri_error_function());
}
@@ -3372,9 +3372,11 @@ Maybe<bool> Value::Equals(Local<Context> context, Local<Value> that) const {
}
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Value::Equals()", bool);
i::Handle<i::Object> args[] = { other };
+ i::Handle<i::JSFunction> fun(i::JSFunction::cast(
+ isolate->js_builtins_object()->javascript_builtin(i::Builtins::EQUALS)));
i::Handle<i::Object> result;
has_pending_exception =
- !CallV8HeapFunction(isolate, "EQUALS", self, arraysize(args), args)
+ !i::Execution::Call(isolate, fun, self, arraysize(args), args)
.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(*result == i::Smi::FromInt(i::EQUAL));
@@ -3397,33 +3399,9 @@ bool Value::Equals(Local<Value> that) const {
bool Value::StrictEquals(Local<Value> that) const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> other = Utils::OpenHandle(*that);
- if (obj->IsSmi()) {
- return other->IsNumber() && obj->Number() == other->Number();
- }
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
- LOG_API(isolate, "StrictEquals");
- // Must check HeapNumber first, since NaN !== NaN.
- if (obj->IsHeapNumber()) {
- if (!other->IsNumber()) return false;
- double x = obj->Number();
- double y = other->Number();
- // Must check explicitly for NaN:s on Windows, but -0 works fine.
- return x == y && !std::isnan(x) && !std::isnan(y);
- } else if (*obj == *other) { // Also covers Booleans.
- return true;
- } else if (obj->IsSmi()) {
- return other->IsNumber() && obj->Number() == other->Number();
- } else if (obj->IsString()) {
- return other->IsString() &&
- i::String::Equals(i::Handle<i::String>::cast(obj),
- i::Handle<i::String>::cast(other));
- } else if (obj->IsUndefined() || obj->IsUndetectableObject()) {
- return other->IsUndefined() || other->IsUndetectableObject();
- } else {
- return false;
- }
+ auto self = Utils::OpenHandle(this);
+ auto other = Utils::OpenHandle(*that);
+ return self->StrictEquals(*other);
}
@@ -3459,8 +3437,8 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context, uint32_t index,
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Set()", bool);
auto self = Utils::OpenHandle(this);
auto value_obj = Utils::OpenHandle(*value);
- has_pending_exception =
- i::JSReceiver::SetElement(self, index, value_obj, i::SLOPPY).is_null();
+ has_pending_exception = i::Object::SetElement(isolate, self, index, value_obj,
+ i::SLOPPY).is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@@ -3528,11 +3506,12 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
i::Handle<i::JSArray> desc_array =
isolate->factory()->NewJSArrayWithElements(desc, i::FAST_ELEMENTS, 3);
i::Handle<i::Object> args[] = {self, key_obj, value_obj, desc_array};
+ i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
+ i::Handle<i::JSFunction> fun = isolate->object_define_own_property();
i::Handle<i::Object> result;
has_pending_exception =
- !CallV8HeapFunction(isolate, "$objectDefineOwnProperty",
- isolate->factory()->undefined_value(),
- arraysize(args), args).ToHandle(&result);
+ !i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(result->BooleanValue());
}
@@ -3664,11 +3643,12 @@ MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
auto obj = Utils::OpenHandle(this);
auto key_name = Utils::OpenHandle(*key);
i::Handle<i::Object> args[] = { obj, key_name };
+ i::Handle<i::JSFunction> fun = isolate->object_get_own_property_descriptor();
+ i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
i::Handle<i::Object> result;
has_pending_exception =
- !CallV8HeapFunction(isolate, "$objectGetOwnPropertyDescriptor",
- isolate->factory()->undefined_value(),
- arraysize(args), args).ToHandle(&result);
+ !i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(Utils::ToLocal(result));
}
@@ -3798,7 +3778,8 @@ MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
isolate, self, toStringTag).ToHandle(&tag);
RETURN_ON_FAILED_EXECUTION(String);
if (tag->IsString()) {
- class_name = i::Handle<i::String>::cast(tag).EscapeFrom(&handle_scope);
+ class_name = Utils::OpenHandle(*handle_scope.Escape(
+ Utils::ToLocal(i::Handle<i::String>::cast(tag))));
}
}
const char* prefix = "[object ";
@@ -5123,7 +5104,6 @@ static inline int WriteHelper(const String* string,
ENTER_V8(isolate);
DCHECK(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(string);
- isolate->string_tracker()->RecordWrite(str);
if (options & String::HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringCharacterStream or Get(i) to access the characters.
@@ -5360,15 +5340,6 @@ void v8::V8::SetReturnAddressLocationResolver(
i::V8::SetReturnAddressLocationResolver(return_address_resolver);
}
-void v8::V8::SetArrayBufferAllocator(
- ArrayBuffer::Allocator* allocator) {
- if (!Utils::ApiCheck(i::V8::ArrayBufferAllocator() == NULL,
- "v8::V8::SetArrayBufferAllocator",
- "ArrayBufferAllocator might only be set once"))
- return;
- i::V8::SetArrayBufferAllocator(allocator);
-}
-
bool v8::V8::Dispose() {
i::V8::TearDown();
@@ -5557,11 +5528,11 @@ void Context::DetachGlobal() {
}
-Local<v8::Object> Context::GetExtrasExportsObject() {
+Local<v8::Object> Context::GetExtrasBindingObject() {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
- i::Handle<i::JSObject> exports(context->extras_exports_object(), isolate);
- return Utils::ToLocal(exports);
+ i::Handle<i::JSObject> binding(context->extras_binding_object(), isolate);
+ return Utils::ToLocal(binding);
}
@@ -5587,6 +5558,12 @@ void Context::SetErrorMessageForCodeGenerationFromStrings(Local<String> error) {
}
+size_t Context::EstimatedSize() {
+ return static_cast<size_t>(
+ i::ContextMeasure(*Utils::OpenHandle(this)).Size());
+}
+
+
MaybeLocal<v8::Object> ObjectTemplate::NewInstance(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, "v8::ObjectTemplate::NewInstance()", Object);
auto self = Utils::OpenHandle(this);
@@ -5850,9 +5827,6 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
return false; // Already an external string.
}
ENTER_V8(isolate);
- if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
- return false;
- }
if (isolate->heap()->IsInGCPostProcessing()) {
return false;
}
@@ -5877,9 +5851,6 @@ bool v8::String::MakeExternal(
return false; // Already an external string.
}
ENTER_V8(isolate);
- if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
- return false;
- }
if (isolate->heap()->IsInGCPostProcessing()) {
return false;
}
@@ -5900,9 +5871,10 @@ bool v8::String::CanMakeExternal() {
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
- if (isolate->string_tracker()->IsFreshUnusedString(obj)) return false;
+ // Old space strings should be externalized.
+ if (!isolate->heap()->new_space()->Contains(*obj)) return true;
int size = obj->Size(); // Byte size of the original string.
- if (size < i::ExternalString::kShortSize) return false;
+ if (size <= i::ExternalString::kShortSize) return false;
i::StringShape shape(*obj);
return !shape.IsExternal();
}
@@ -6587,6 +6559,8 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
size_t byte_length,
ArrayBufferCreationMode mode) {
+ // Embedders must guarantee that the external backing store is valid.
+ CHECK(byte_length == 0 || data != NULL);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, "v8::ArrayBuffer::New(void*, size_t)");
ENTER_V8(i_isolate);
@@ -6784,6 +6758,8 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
Isolate* isolate, void* data, size_t byte_length,
ArrayBufferCreationMode mode) {
CHECK(i::FLAG_harmony_sharedarraybuffer);
+ // Embedders must guarantee that the external backing store is valid.
+ CHECK(byte_length == 0 || data != NULL);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, "v8::SharedArrayBuffer::New(void*, size_t)");
ENTER_V8(i_isolate);
@@ -6896,8 +6872,31 @@ Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
void Isolate::CollectAllGarbage(const char* gc_reason) {
- reinterpret_cast<i::Isolate*>(this)->heap()->CollectAllGarbage(
- i::Heap::kNoGCFlags, gc_reason);
+ i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap();
+ DCHECK_EQ(heap->gc_state(), i::Heap::NOT_IN_GC);
+ if (heap->incremental_marking()->IsStopped()) {
+ if (heap->incremental_marking()->CanBeActivated()) {
+ heap->StartIncrementalMarking(
+ i::Heap::kNoGCFlags,
+ kGCCallbackFlagSynchronousPhantomCallbackProcessing, gc_reason);
+ } else {
+ heap->CollectAllGarbage(
+ i::Heap::kNoGCFlags, gc_reason,
+ kGCCallbackFlagSynchronousPhantomCallbackProcessing);
+ }
+ } else {
+ // Incremental marking is turned on an has already been started.
+
+ // TODO(mlippautz): Compute the time slice for incremental marking based on
+ // memory pressure.
+ double deadline = heap->MonotonicallyIncreasingTimeInMs() +
+ i::FLAG_external_allocation_limit_incremental_time;
+ heap->AdvanceIncrementalMarking(
+ 0, deadline, i::IncrementalMarking::StepActions(
+ i::IncrementalMarking::GC_VIA_STACK_GUARD,
+ i::IncrementalMarking::FORCE_MARKING,
+ i::IncrementalMarking::FORCE_COMPLETION));
+ }
}
@@ -6989,47 +6988,41 @@ void Isolate::SetReference(internal::Object** parent,
}
-void Isolate::AddGCPrologueCallback(GCPrologueCallback callback,
- GCType gc_type) {
+void Isolate::AddGCPrologueCallback(GCCallback callback, GCType gc_type) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->AddGCPrologueCallback(callback, gc_type);
}
-void Isolate::RemoveGCPrologueCallback(GCPrologueCallback callback) {
+void Isolate::RemoveGCPrologueCallback(GCCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->RemoveGCPrologueCallback(callback);
}
-void Isolate::AddGCEpilogueCallback(GCEpilogueCallback callback,
- GCType gc_type) {
+void Isolate::AddGCEpilogueCallback(GCCallback callback, GCType gc_type) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->AddGCEpilogueCallback(callback, gc_type);
}
-void Isolate::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
+void Isolate::RemoveGCEpilogueCallback(GCCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->RemoveGCEpilogueCallback(callback);
}
-void V8::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
+void V8::AddGCPrologueCallback(GCCallback callback, GCType gc_type) {
i::Isolate* isolate = i::Isolate::Current();
isolate->heap()->AddGCPrologueCallback(
- reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback),
- gc_type,
- false);
+ reinterpret_cast<v8::Isolate::GCCallback>(callback), gc_type, false);
}
-void V8::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
+void V8::AddGCEpilogueCallback(GCCallback callback, GCType gc_type) {
i::Isolate* isolate = i::Isolate::Current();
isolate->heap()->AddGCEpilogueCallback(
- reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback),
- gc_type,
- false);
+ reinterpret_cast<v8::Isolate::GCCallback>(callback), gc_type, false);
}
@@ -7096,20 +7089,11 @@ Isolate* Isolate::GetCurrent() {
}
-Isolate* Isolate::New() {
- Isolate::CreateParams create_params;
- return New(create_params);
-}
-
-
Isolate* Isolate::New(const Isolate::CreateParams& params) {
i::Isolate* isolate = new i::Isolate(false);
Isolate* v8_isolate = reinterpret_cast<Isolate*>(isolate);
- if (params.array_buffer_allocator != NULL) {
- isolate->set_array_buffer_allocator(params.array_buffer_allocator);
- } else {
- isolate->set_array_buffer_allocator(i::V8::ArrayBufferAllocator());
- }
+ CHECK(params.array_buffer_allocator != NULL);
+ isolate->set_array_buffer_allocator(params.array_buffer_allocator);
if (params.snapshot_blob != NULL) {
isolate->set_snapshot_blob(params.snapshot_blob);
} else {
@@ -7176,13 +7160,6 @@ void Isolate::Exit() {
}
-void Isolate::SetAbortOnUncaughtExceptionCallback(
- AbortOnUncaughtExceptionCallback callback) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->SetAbortOnUncaughtExceptionCallback(callback);
-}
-
-
Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
Isolate* isolate,
Isolate::DisallowJavascriptExecutionScope::OnFailure on_failure)
@@ -7616,26 +7593,27 @@ String::Value::~Value() {
}
-#define DEFINE_ERROR(NAME) \
- Local<Value> Exception::NAME(v8::Local<v8::String> raw_message) { \
- i::Isolate* isolate = i::Isolate::Current(); \
- LOG_API(isolate, #NAME); \
- ENTER_V8(isolate); \
- i::Object* error; \
- { \
- i::HandleScope scope(isolate); \
- i::Handle<i::String> message = Utils::OpenHandle(*raw_message); \
- error = *isolate->factory()->NewError("$" #NAME, message); \
- } \
- i::Handle<i::Object> result(error, isolate); \
- return Utils::ToLocal(result); \
- }
-
-DEFINE_ERROR(RangeError)
-DEFINE_ERROR(ReferenceError)
-DEFINE_ERROR(SyntaxError)
-DEFINE_ERROR(TypeError)
-DEFINE_ERROR(Error)
+#define DEFINE_ERROR(NAME, name) \
+ Local<Value> Exception::NAME(v8::Local<v8::String> raw_message) { \
+ i::Isolate* isolate = i::Isolate::Current(); \
+ LOG_API(isolate, #NAME); \
+ ENTER_V8(isolate); \
+ i::Object* error; \
+ { \
+ i::HandleScope scope(isolate); \
+ i::Handle<i::String> message = Utils::OpenHandle(*raw_message); \
+ i::Handle<i::JSFunction> constructor = isolate->name##_function(); \
+ error = *isolate->factory()->NewError(constructor, message); \
+ } \
+ i::Handle<i::Object> result(error, isolate); \
+ return Utils::ToLocal(result); \
+ }
+
+DEFINE_ERROR(RangeError, range_error)
+DEFINE_ERROR(ReferenceError, reference_error)
+DEFINE_ERROR(SyntaxError, syntax_error)
+DEFINE_ERROR(TypeError, type_error)
+DEFINE_ERROR(Error, error)
#undef DEFINE_ERROR
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index b20ef5cf66..1229279598 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -5,13 +5,12 @@
#ifndef V8_API_H_
#define V8_API_H_
-#include "src/v8.h"
-
#include "include/v8-testing.h"
#include "src/contexts.h"
#include "src/factory.h"
#include "src/isolate.h"
-#include "src/list-inl.h"
+#include "src/list.h"
+#include "src/objects-inl.h"
namespace v8 {
@@ -309,17 +308,6 @@ OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
template <class T>
-v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
- v8::EscapableHandleScope* scope) {
- v8::internal::Handle<T> handle;
- if (!is_null()) {
- handle = *this;
- }
- return Utils::OpenHandle(*scope->Escape(Utils::ToLocal(handle)), true);
-}
-
-
-template <class T>
inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
return reinterpret_cast<T*>(obj.location());
}
@@ -417,72 +405,6 @@ OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
namespace internal {
-// Tracks string usage to help make better decisions when
-// externalizing strings.
-//
-// Implementation note: internally this class only tracks fresh
-// strings and keeps a single use counter for them.
-class StringTracker {
- public:
- // Records that the given string's characters were copied to some
- // external buffer. If this happens often we should honor
- // externalization requests for the string.
- void RecordWrite(Handle<String> string) {
- Address address = reinterpret_cast<Address>(*string);
- Address top = isolate_->heap()->NewSpaceTop();
- if (IsFreshString(address, top)) {
- IncrementUseCount(top);
- }
- }
-
- // Estimates freshness and use frequency of the given string based
- // on how close it is to the new space top and the recorded usage
- // history.
- inline bool IsFreshUnusedString(Handle<String> string) {
- Address address = reinterpret_cast<Address>(*string);
- Address top = isolate_->heap()->NewSpaceTop();
- return IsFreshString(address, top) && IsUseCountLow(top);
- }
-
- private:
- StringTracker() : use_count_(0), last_top_(NULL), isolate_(NULL) { }
-
- static inline bool IsFreshString(Address string, Address top) {
- return top - kFreshnessLimit <= string && string <= top;
- }
-
- inline bool IsUseCountLow(Address top) {
- if (last_top_ != top) return true;
- return use_count_ < kUseLimit;
- }
-
- inline void IncrementUseCount(Address top) {
- if (last_top_ != top) {
- use_count_ = 0;
- last_top_ = top;
- }
- ++use_count_;
- }
-
- // Single use counter shared by all fresh strings.
- int use_count_;
-
- // Last new space top when the use count above was valid.
- Address last_top_;
-
- Isolate* isolate_;
-
- // How close to the new space top a fresh string has to be.
- static const int kFreshnessLimit = 1024;
-
- // The number of uses required to consider a string useful.
- static const int kUseLimit = 32;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(StringTracker);
-};
-
class DeferredHandles {
public:
diff --git a/deps/v8/src/arguments.cc b/deps/v8/src/arguments.cc
index e7e51fed1f..a783357896 100644
--- a/deps/v8/src/arguments.cc
+++ b/deps/v8/src/arguments.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/arguments.h"
+
+#include "src/api.h"
#include "src/vm-state-inl.h"
namespace v8 {
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index c94014505a..ed995e7f58 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -269,9 +269,6 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
#endif
-#define DECLARE_RUNTIME_FUNCTION(Name) \
-Object* Name(int args_length, Object** args_object, Isolate* isolate)
-
#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
@@ -286,9 +283,6 @@ static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
#define RUNTIME_FUNCTION_RETURN_PAIR(Name) \
RUNTIME_FUNCTION_RETURNS_TYPE(ObjectPair, Name)
-#define RUNTIME_ARGUMENTS(isolate, args) \
- args.length(), args.arguments(), isolate
-
} } // namespace v8::internal
#endif // V8_ARGUMENTS_H_
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 4b4e1d3208..523000ec3a 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -40,7 +40,7 @@
#include "src/arm/assembler-arm.h"
#include "src/assembler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
namespace v8 {
@@ -97,7 +97,7 @@ DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
}
-void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
@@ -272,19 +272,18 @@ void RelocInfo::set_code_age_stub(Code* stub,
}
-Address RelocInfo::call_address() {
+Address RelocInfo::debug_call_address() {
// The 2 instructions offset assumes patched debug break slot or return
// sequence.
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+ return Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset);
}
-void RelocInfo::set_call_address(Address target) {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+void RelocInfo::set_debug_call_address(Address target) {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+ Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset) =
+ target;
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -293,23 +292,6 @@ void RelocInfo::set_call_address(Address target) {
}
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-Object** RelocInfo::call_object_address() {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@@ -353,11 +335,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- isolate->debug()->has_break_points()) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
@@ -380,11 +359,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
@@ -504,11 +480,6 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
-Address Assembler::break_address_from_return_address(Address pc) {
- return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
-}
-
-
Address Assembler::return_address_from_call_start(Address pc) {
if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) |
IsLdrPpImmediateOffset(Memory::int32_at(pc))) {
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 481a3b5ced..633b5d12c0 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -34,8 +34,6 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm-inl.h"
@@ -1326,7 +1324,8 @@ int Assembler::branch_offset(Label* L) {
// Block the emission of the constant pool, since the branch instruction must
// be emitted at the pc offset recorded by the label.
- BlockConstPoolFor(1);
+ if (!is_const_pool_blocked()) BlockConstPoolFor(1);
+
return target_pos - (pc_offset() + kPcLoadDelta);
}
@@ -2573,6 +2572,12 @@ void Assembler::vmov(const DwVfpRegister dst,
double imm,
const Register scratch) {
uint32_t enc;
+ // If the embedded constant pool is disabled, we can use the normal, inline
+ // constant pool. If the embedded constant pool is enabled (via
+ // FLAG_enable_embedded_constant_pool), we can only use it where the pool
+ // pointer (pp) is valid.
+ bool can_use_pool =
+ !FLAG_enable_embedded_constant_pool || is_constant_pool_available();
if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
// The double can be encoded in the instruction.
//
@@ -2583,7 +2588,7 @@ void Assembler::vmov(const DwVfpRegister dst,
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
- } else if (FLAG_enable_vldr_imm && is_constant_pool_available()) {
+ } else if (FLAG_enable_vldr_imm && can_use_pool) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
// generated data which also happens to be executable, a Very Bad
@@ -3588,11 +3593,10 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
- // No relocation info should be pending while using db. db is used
- // to write pure data with no pointers and the constant pool should
- // be emitted before using db.
- DCHECK(num_pending_32_bit_constants_ == 0);
- DCHECK(num_pending_64_bit_constants_ == 0);
+ // db is used to write raw data. The constant pool should be emitted or
+ // blocked before using db.
+ DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
+ DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@@ -3600,11 +3604,10 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data) {
- // No relocation info should be pending while using dd. dd is used
- // to write pure data with no pointers and the constant pool should
- // be emitted before using dd.
- DCHECK(num_pending_32_bit_constants_ == 0);
- DCHECK(num_pending_64_bit_constants_ == 0);
+ // dd is used to write raw data. The constant pool should be emitted or
+ // blocked before using dd.
+ DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
+ DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
@@ -3612,11 +3615,10 @@ void Assembler::dd(uint32_t data) {
void Assembler::dq(uint64_t value) {
- // No relocation info should be pending while using dq. dq is used
- // to write pure data with no pointers and the constant pool should
- // be emitted before using dd.
- DCHECK(num_pending_32_bit_constants_ == 0);
- DCHECK(num_pending_64_bit_constants_ == 0);
+ // dq is used to write raw data. The constant pool should be emitted or
+ // blocked before using dq.
+ DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
+ DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
CheckBuffer();
*reinterpret_cast<uint64_t*>(pc_) = value;
pc_ += sizeof(uint64_t);
@@ -3755,11 +3757,13 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
int size_up_to_marker = jump_instr + kInstrSize;
int estimated_size_after_marker =
num_pending_32_bit_constants_ * kPointerSize;
+ bool has_int_values = (num_pending_32_bit_constants_ > 0);
bool has_fp_values = (num_pending_64_bit_constants_ > 0);
bool require_64_bit_align = false;
if (has_fp_values) {
- require_64_bit_align = IsAligned(
- reinterpret_cast<intptr_t>(pc_ + size_up_to_marker), kDoubleAlignment);
+ require_64_bit_align =
+ !IsAligned(reinterpret_cast<intptr_t>(pc_ + size_up_to_marker),
+ kDoubleAlignment);
if (require_64_bit_align) {
estimated_size_after_marker += kInstrSize;
}
@@ -3776,9 +3780,11 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// * the instruction doesn't require a jump after itself to jump over the
// constant pool, and we're getting close to running out of range.
if (!force_emit) {
- DCHECK((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
+ DCHECK(has_fp_values || has_int_values);
bool need_emit = false;
if (has_fp_values) {
+ // The 64-bit constants are always emitted before the 32-bit constants, so
+ // we can ignore the effect of the 32-bit constants on estimated_size.
int dist64 = pc_offset() + estimated_size -
num_pending_32_bit_constants_ * kPointerSize -
first_const_pool_64_use_;
@@ -3787,10 +3793,12 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
need_emit = true;
}
}
- int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
- if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
- (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
- need_emit = true;
+ if (has_int_values) {
+ int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
+ if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
+ (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
+ need_emit = true;
+ }
}
if (!need_emit) return;
}
@@ -3839,7 +3847,10 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
bind(&size_check);
// Emit jump over constant pool if necessary.
- if (require_jump) b(size - kPcLoadDelta);
+ Label after_pool;
+ if (require_jump) {
+ b(&after_pool);
+ }
// Put down constant pool marker "Undefined instruction".
// The data size helps disassembly know what to print.
@@ -3923,6 +3934,10 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
RecordComment("]");
DCHECK_EQ(size, SizeOfCodeGeneratedSince(&size_check));
+
+ if (after_pool.is_linked()) {
+ bind(&after_pool);
+ }
}
// Since a constant pool was just emitted, move the check offset forward by
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 5d66c39a77..d0fcac206e 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -737,9 +737,6 @@ class Assembler : public AssemblerBase {
// in the instruction stream that the call will return from.
INLINE(static Address return_address_from_call_start(Address pc));
- // Return the code target address of the patch debug break slot
- INLINE(static Address break_address_from_return_address(Address pc));
-
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -758,30 +755,18 @@ class Assembler : public AssemblerBase {
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- // Patched return sequence is:
- // ldr ip, [pc, #0] @ emited address and start
- // blx ip
- static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize;
-
// Distance between start of patched debug break slot and the emitted address
// to jump to.
// Patched debug break slot code is:
// ldr ip, [pc, #0] @ emited address and start
// blx ip
- static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
-
- static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstrSize;
+ static const int kPatchDebugBreakSlotAddressOffset = 2 * kInstrSize;
// Difference between address of current opcode and value read from pc
// register.
static const int kPcLoadDelta = 8;
- static const int kJSReturnSequenceInstructions = 4;
- static const int kJSReturnSequenceLength =
- kJSReturnSequenceInstructions * kInstrSize;
- static const int kDebugBreakSlotInstructions = 3;
+ static const int kDebugBreakSlotInstructions = 4;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
@@ -1354,11 +1339,11 @@ class Assembler : public AssemblerBase {
// Debugging
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
+ // Mark generator continuation.
+ void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot();
+ void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 2859f97dc4..cf91753e1a 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/codegen.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -311,39 +309,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
-static void Generate_Runtime_NewObject(MacroAssembler* masm,
- bool create_memento,
- Register original_constructor,
- Label* count_incremented,
- Label* allocated) {
- if (create_memento) {
- // Get the cell or allocation site.
- __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
- __ push(r2);
- }
-
- __ push(r1); // argument for Runtime_NewObject
- __ push(original_constructor); // original constructor
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- } else {
- __ CallRuntime(Runtime::kNewObject, 2);
- }
- __ mov(r4, r0);
-
- // Runtime_NewObjectWithAllocationSite increments allocation count.
- // Skip the increment.
- if (create_memento) {
- __ jmp(count_incremented);
- } else {
- __ jmp(allocated);
- }
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -363,32 +330,18 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
- if (create_memento) {
- __ AssertUndefinedOrAllocationSite(r2, r4);
- __ push(r2);
- }
-
// Preserve the incoming parameters on the stack.
+ __ AssertUndefinedOrAllocationSite(r2, r4);
+ __ push(r2);
__ SmiTag(r0);
__ push(r0);
__ push(r1);
- if (use_new_target) {
- __ push(r3);
- }
-
- Label rt_call, allocated, normal_new, count_incremented;
- __ cmp(r1, r3);
- __ b(eq, &normal_new);
-
- // Original constructor and function are different.
- Generate_Runtime_NewObject(masm, create_memento, r3, &count_incremented,
- &allocated);
- __ bind(&normal_new);
+ __ push(r3);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
if (FLAG_inline_new) {
- Label undo_allocation;
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(isolate);
__ mov(r2, Operand(debug_step_in_fp));
@@ -396,11 +349,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ tst(r2, r2);
__ b(ne, &rt_call);
+ // Fall back to runtime if the original constructor and function differ.
+ __ cmp(r1, r3);
+ __ b(ne, &rt_call);
+
// Load the initial map and verify that it is in fact a map.
// r1: constructor function
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(r2, &rt_call);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ CompareObjectType(r2, r5, r4, MAP_TYPE);
__ b(ne, &rt_call);
// Check that the constructor is not constructing a JSFunction (see
@@ -408,7 +365,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map's instance type would be JS_FUNCTION_TYPE.
// r1: constructor function
// r2: initial map
- __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
+ __ CompareInstanceType(r2, r5, JS_FUNCTION_TYPE);
__ b(eq, &rt_call);
if (!is_api_function) {
@@ -439,12 +396,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Now allocate the JSObject on the heap.
// r1: constructor function
// r2: initial map
+ Label rt_call_reload_new_target;
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
if (create_memento) {
__ add(r3, r3, Operand(AllocationMemento::kSize / kPointerSize));
}
- __ Allocate(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
+ __ Allocate(r3, r4, r5, r6, &rt_call_reload_new_target, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
@@ -481,8 +439,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate object with a slack.
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ __ Ubfx(r0, r0, Map::kInObjectPropertiesOrConstructorFunctionIndexByte *
+ kBitsPerByte,
+ kBitsPerByte);
+ __ ldr(r2, FieldMemOperand(r2, Map::kInstanceAttributesOffset));
+ __ Ubfx(r2, r2, Map::kUnusedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
+ __ sub(r0, r0, Operand(r2));
__ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
// r0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
@@ -509,7 +472,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
// Load the AllocationSite
- __ ldr(r6, MemOperand(sp, 2 * kPointerSize));
+ __ ldr(r6, MemOperand(sp, 3 * kPointerSize));
+ __ AssertUndefinedOrAllocationSite(r6, r0);
DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
} else {
@@ -518,104 +482,50 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
+ // and jump into the continuation code at any time from now on.
__ add(r4, r4, Operand(kHeapObjectTag));
- // Check if a non-empty properties array is needed. Continue with
- // allocated object if not; allocate and initialize a FixedArray if yes.
- // r1: constructor function
- // r4: JSObject
- // r5: start of next object (not tagged)
- __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields
- // and in-object properties.
- __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ add(r3, r3, Operand(r6));
- __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte,
- kBitsPerByte);
- __ sub(r3, r3, Operand(r6), SetCC);
-
- // Done if no extra properties are to be allocated.
- __ b(eq, &allocated);
- __ Assert(pl, kPropertyAllocationCountFailed);
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: start of next object
- __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ Allocate(
- r0,
- r5,
- r6,
- r2,
- &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
- // Initialize the FixedArray.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
- __ mov(r2, r5);
- DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
- DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ SmiTag(r0, r3);
- __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
-
- // Initialize the fields to undefined.
- // r1: constructor function
- // r2: First element of FixedArray (not tagged)
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ InitializeFieldsWithFiller(r2, r6, r0);
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // r1: constructor function
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag.
- __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
-
// Continue with JSObject being successfully allocated
- // r1: constructor function
// r4: JSObject
__ jmp(&allocated);
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // r4: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(r4, r5);
+ // Reload the original constructor and fall-through.
+ __ bind(&rt_call_reload_new_target);
+ __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
}
// Allocate the new receiver object using the runtime call.
// r1: constructor function
+ // r3: original constructor
__ bind(&rt_call);
- Generate_Runtime_NewObject(masm, create_memento, r1, &count_incremented,
- &allocated);
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
+ __ push(r2); // argument 1: allocation site
+ }
+
+ __ push(r1); // argument 2/1: constructor function
+ __ push(r3); // argument 3/2: original constructor
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 2);
+ }
+ __ mov(r4, r0);
+
+ // Runtime_NewObjectWithAllocationSite increments allocation count.
+ // Skip the increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
// Receiver for constructor call allocated.
// r4: JSObject
__ bind(&allocated);
if (create_memento) {
- int offset = (use_new_target ? 3 : 2) * kPointerSize;
- __ ldr(r2, MemOperand(sp, offset));
+ __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ cmp(r2, r5);
__ b(eq, &count_incremented);
@@ -630,9 +540,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore the parameters.
- if (use_new_target) {
- __ pop(r3);
- }
+ __ pop(r3);
__ pop(r1);
// Retrieve smi-tagged arguments count from the stack.
@@ -641,9 +549,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Push new.target onto the construct frame. This is stored just below the
// receiver on the stack.
- if (use_new_target) {
- __ push(r3);
- }
+ __ push(r3);
__ push(r4);
__ push(r4);
@@ -657,8 +563,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: new.target (if used)
- // sp[2/3]: number of arguments (smi-tagged)
+ // sp[2]: new.target
+ // sp[3]: number of arguments (smi-tagged)
Label loop, entry;
__ SmiTag(r3, r0);
__ b(&entry);
@@ -683,17 +589,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- // TODO(arv): Remove the "!use_new_target" before supporting optimization
- // of functions that reference new.target
- if (!is_api_function && !use_new_target) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
// r0: result
// sp[0]: receiver
- // sp[1]: new.target (if used)
- // sp[1/2]: number of arguments (smi-tagged)
+ // sp[1]: new.target
+ // sp[2]: number of arguments (smi-tagged)
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
@@ -703,9 +607,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense.
// r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (if used)
- // sp[1/2]: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: new.target
+ // sp[2]: number of arguments (smi-tagged)
__ JumpIfSmi(r0, &use_receiver);
// If the type of the result (stored in its map) is less than
@@ -723,10 +627,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&exit);
// r0: result
// sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (if used)
- // sp[1/2]: number of arguments (smi-tagged)
- int offset = (use_new_target ? 2 : 1) * kPointerSize;
- __ ldr(r1, MemOperand(sp, offset));
+ // sp[1]: new.target (original constructor)
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
// Leave construct frame.
}
@@ -739,17 +642,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, true, false);
}
@@ -763,12 +661,12 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
- // TODO(dslomov): support pretenuring
- CHECK(!FLAG_pretenuring_call_new);
-
{
FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
+ __ AssertUndefinedOrAllocationSite(r2, r4);
+ __ push(r2);
+
__ mov(r4, r0);
__ SmiTag(r4);
__ push(r4); // Smi-tagged arguments count.
@@ -970,6 +868,147 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// o r1: the JS function object being called.
+// o cp: our context
+// o pp: the caller's constant pool pointer (if enabled)
+// o fp: the caller's frame pointer
+// o sp: stack pointer
+// o lr: return address
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-arm.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ PushFixedFrame(r1);
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into kInterpreterBytecodeRegister.
+ __ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, r0, no_reg,
+ BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size from the BytecodeArray object.
+ __ ldr(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ __ sub(r9, sp, Operand(r4));
+ __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+ __ cmp(r9, Operand(r2));
+ __ b(hs, &ok);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ Label loop_header;
+ Label loop_check;
+ __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+ __ b(&loop_check, al);
+ __ bind(&loop_header);
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ __ push(r9);
+ // Continue loop if not done.
+ __ bind(&loop_check);
+ __ sub(r4, r4, Operand(kPointerSize), SetCC);
+ __ b(&loop_header, ge);
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Support profiler (specifically profiling_counter).
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Allow simulator stop operations if FLAG_stop_at is set.
+ // - Deal with sloppy mode functions which need to replace the
+ // receiver with the global proxy when called as functions (without an
+ // explicit receiver object).
+ // - Code aging of the BytecodeArray object.
+ // - Supporting FLAG_trace.
+ //
+ // The following items are also not done here, and will probably be done using
+ // explicit bytecodes instead:
+ // - Allocating a new local context if applicable.
+ // - Setting up a local binding to the this function, which is used in
+ // derived constructors with super calls.
+ // - Setting new.target if required.
+ // - Dealing with REST parameters (only if
+ // https://codereview.chromium.org/1235153006 doesn't land by then).
+ // - Dealing with argument objects.
+
+ // Perform stack guard check.
+ {
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ bind(&ok);
+ }
+
+ // Load accumulator, register file, bytecode offset, dispatch table into
+ // registers.
+ __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ sub(kInterpreterRegisterFileRegister, fp,
+ Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Dispatch to the first bytecode handler for the function.
+ __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
+ kPointerSizeLog2));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(ip);
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // The return value is in accumulator, which is already in r0.
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ // Drop receiver + arguments.
+ __ Drop(1); // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+ __ Jump(lr);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@@ -1282,8 +1321,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ SmiTag(r0);
__ push(r0);
- __ push(r2);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(r0, r2);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mov(r2, r0);
__ pop(r0);
@@ -1396,6 +1436,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int vectorOffset,
const int argumentsOffset,
const int indexOffset,
const int limitOffset) {
@@ -1413,12 +1454,9 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ ldr(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
- FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
- Handle<TypeFeedbackVector> feedback_vector =
- masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
- int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
- __ mov(slot, Operand(Smi::FromInt(index)));
- __ Move(vector, feedback_vector);
+ int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
+ __ mov(slot, Operand(Smi::FromInt(slot_index)));
+ __ ldr(vector, MemOperand(fp, vectorOffset));
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -1453,6 +1491,13 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
const int kReceiverOffset = kArgumentsOffset + kPointerSize;
const int kFunctionOffset = kReceiverOffset + kPointerSize;
+ const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+
+ // Push the vector.
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ Push(r1);
__ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
__ push(r0);
@@ -1467,10 +1512,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
Generate_CheckStackOverflow(masm, kFunctionOffset, r0, kArgcIsSmiTagged);
// Push current limit and index.
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
+ const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
__ push(r0); // limit
__ mov(r1, Operand::Zero()); // initial index
__ push(r1);
@@ -1519,8 +1562,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
// Convert the receiver to a regular object.
// r0: receiver
__ bind(&call_to_object);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ b(&push_receiver);
__ bind(&use_global_proxy);
@@ -1533,8 +1576,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ push(r0);
// Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// Call the function.
Label call_proxy;
@@ -1573,6 +1616,13 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
const int kFunctionOffset = kArgumentsOffset + kPointerSize;
+ static const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+
+ // Push the vector.
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ Push(r1);
// If newTarget is not supplied, set it to constructor
Label validate_arguments;
@@ -1595,33 +1645,28 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
Generate_CheckStackOverflow(masm, kFunctionOffset, r0, kArgcIsSmiTagged);
// Push current limit and index.
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
+ const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
__ push(r0); // limit
__ mov(r1, Operand::Zero()); // initial index
__ push(r1);
- // Push newTarget and callee functions
- __ ldr(r0, MemOperand(fp, kNewTargetOffset));
- __ push(r0);
+ // Push the constructor function as callee.
__ ldr(r0, MemOperand(fp, kFunctionOffset));
__ push(r0);
// Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// Use undefined feedback vector
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ ldr(r1, MemOperand(fp, kFunctionOffset));
+ __ ldr(r4, MemOperand(fp, kNewTargetOffset));
// Call the function.
CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
-
// Leave internal frame.
}
__ add(sp, sp, Operand(kStackSize * kPointerSize));
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 005fb97513..8193816c84 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/base/bits.h"
@@ -14,8 +12,8 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
-#include "src/jsregexp.h"
-#include "src/regexp-macro-assembler.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -33,7 +31,7 @@ static void InitializeArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -49,7 +47,7 @@ static void InitializeInternalArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -255,6 +253,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
+ __ b(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics, since
// we need to throw a TypeError. Smis have already been ruled out.
@@ -273,6 +274,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
+ __ b(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics,
// since we need to throw a TypeError. Smis and heap numbers have
@@ -675,26 +679,30 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
- if (cc == eq) {
- native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == eq && strict()) {
+ __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
} else {
- native =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- int ncr; // NaN compare result
- if (cc == lt || cc == le) {
- ncr = GREATER;
+ Builtins::JavaScript native;
+ if (cc == eq) {
+ native = Builtins::EQUALS;
} else {
- DCHECK(cc == gt || cc == ge); // remaining cases
- ncr = LESS;
+ native =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if (cc == lt || cc == le) {
+ ncr = GREATER;
+ } else {
+ DCHECK(cc == gt || cc == ge); // remaining cases
+ ncr = LESS;
+ }
+ __ mov(r0, Operand(Smi::FromInt(ncr)));
+ __ push(r0);
}
- __ mov(r0, Operand(Smi::FromInt(ncr)));
- __ push(r0);
- }
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -1583,7 +1591,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(r1);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments, 1, 1);
}
@@ -1831,10 +1839,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
- masm->isolate()),
- 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -2378,32 +2383,41 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
+ bool is_super) {
// r0 : number of arguments to the construct function
- // r2 : Feedback vector
- // r3 : slot in feedback vector (Smi)
// r1 : the function to call
+ // r2 : feedback vector
+ // r3 : slot in feedback vector (Smi)
+ // r4 : original constructor (for IsSuperConstructorCall)
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r0);
__ Push(r3, r2, r1, r0);
+ if (is_super) {
+ __ Push(r4);
+ }
__ CallStub(stub);
+ if (is_super) {
+ __ Pop(r4);
+ }
__ Pop(r3, r2, r1, r0);
__ SmiUntag(r0);
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
+static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r0 : number of arguments to the construct function
// r1 : the function to call
- // r2 : Feedback vector
+ // r2 : feedback vector
// r3 : slot in feedback vector (Smi)
+ // r4 : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2411,23 +2425,23 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
- // Load the cache state into r4.
- __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
+ // Load the cache state into r5.
+ __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ ldr(r5, FieldMemOperand(r5, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- // We don't know if r4 is a WeakCell or a Symbol, but it's harmless to read at
+ // We don't know if r5 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in type-feedback-vector.h).
Label check_allocation_site;
- Register feedback_map = r5;
- Register weak_value = r6;
- __ ldr(weak_value, FieldMemOperand(r4, WeakCell::kValueOffset));
+ Register feedback_map = r6;
+ Register weak_value = r9;
+ __ ldr(weak_value, FieldMemOperand(r5, WeakCell::kValueOffset));
__ cmp(r1, weak_value);
__ b(eq, &done);
- __ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
+ __ CompareRoot(r5, Heap::kmegamorphic_symbolRootIndex);
__ b(eq, &done);
- __ ldr(feedback_map, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ ldr(feedback_map, FieldMemOperand(r5, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
__ b(ne, FLAG_pretenuring_call_new ? &miss : &check_allocation_site);
@@ -2445,8 +2459,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ b(ne, &miss);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
- __ cmp(r1, r4);
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
+ __ cmp(r1, r5);
__ b(ne, &megamorphic);
__ jmp(&done);
}
@@ -2455,14 +2469,14 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
+ __ CompareRoot(r5, Heap::kuninitialized_symbolRootIndex);
__ b(eq, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
- __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
+ __ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize));
__ jmp(&done);
// An uninitialized cache is patched with the function
@@ -2470,22 +2484,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
if (!FLAG_pretenuring_call_new) {
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
- __ cmp(r1, r4);
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
+ __ cmp(r1, r5);
__ b(ne, &not_array_function);
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub);
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ b(&done);
__ bind(&not_array_function);
}
CreateWeakCellStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub);
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ bind(&done);
}
@@ -2535,8 +2549,10 @@ static void EmitSlowCase(MacroAssembler* masm,
static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
// Wrap the receiver and patch it back onto the stack.
{ FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(r1, r3);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ push(r1);
+ __ mov(r0, r3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ pop(r1);
}
__ str(r0, MemOperand(sp, argc * kPointerSize));
@@ -2607,18 +2623,18 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r0 : number of arguments
// r1 : the function to call
// r2 : feedback vector
- // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
- // vector (Smi)
+ // r3 : slot in feedback vector (Smi, for RecordCallTarget)
+ // r4 : original constructor (for IsSuperConstructorCall)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
- __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r1, r5, r5, JS_FUNCTION_TYPE);
__ b(ne, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ GenerateRecordCallTarget(masm, IsSuperConstructorCall());
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
if (FLAG_pretenuring_call_new) {
@@ -2642,9 +2658,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// Pass function as original constructor.
if (IsSuperConstructorCall()) {
- __ mov(r4, Operand(1 * kPointerSize));
- __ add(r4, r4, Operand(r0, LSL, kPointerSizeLog2));
- __ ldr(r3, MemOperand(sp, r4));
+ __ mov(r3, r4);
} else {
__ mov(r3, r1);
}
@@ -2658,10 +2672,10 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
- // r4: object type
+ // r5: object type
Label do_call;
__ bind(&slow);
- __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(r5, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function_call);
__ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
@@ -2898,11 +2912,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r1, r2, r3);
// Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
-
- ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
- __ CallExternalReference(miss, 3);
+ Runtime::FunctionId id = GetICState() == DEFAULT
+ ? Runtime::kCallIC_Miss
+ : Runtime::kCallIC_Customization_Miss;
+ __ CallRuntime(id, 3);
// Move result to edi and exit the internal frame.
__ mov(r1, r0);
@@ -3014,10 +3027,9 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
- __ tst(code_,
- Operand(kSmiTagMask |
- ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
+ DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
+ __ tst(code_, Operand(kSmiTagMask |
+ ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
__ b(ne, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
@@ -3294,7 +3306,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
__ bind(&single_char);
// r0: original string
@@ -3481,7 +3493,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3762,7 +3774,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ bind(&miss);
@@ -3814,15 +3826,12 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
-
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
__ Push(lr, r1, r0);
__ mov(ip, Operand(Smi::FromInt(op())));
__ push(ip);
- __ CallExternalReference(miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss, 3);
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -3883,7 +3892,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Register entity_name = scratch0;
// Having undefined at this place means the name is not contained.
- DCHECK_EQ(kSmiTagSize, 1);
+ STATIC_ASSERT(kSmiTagSize == 1);
Register tmp = properties;
__ add(tmp, properties, Operand(index, LSL, 1));
__ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
@@ -3973,8 +3982,8 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
}
__ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
- // Scale the index by multiplying by the element size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ // Scale the index by multiplying by the entry size.
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
__ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
@@ -4058,10 +4067,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ and_(index, mask, Operand(index, LSR, Name::kHashShift));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
- DCHECK_EQ(kSmiTagSize, 1);
+ STATIC_ASSERT(kSmiTagSize == 1);
__ add(index, dictionary, Operand(index, LSL, 2));
__ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
@@ -4528,7 +4537,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
- false, receiver, name, feedback,
+ receiver, name, feedback,
receiver_map, scratch1, r9);
__ bind(&miss);
@@ -4667,8 +4676,9 @@ void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
- int code_size = masm->CallStubSize(&stub) + 2 * Assembler::kInstrSize;
- PredictableCodeSizeScope predictable(masm, code_size);
+ PredictableCodeSizeScope predictable(masm);
+ predictable.ExpectSize(masm->CallStubSize(&stub) +
+ 2 * Assembler::kInstrSize);
__ push(lr);
__ CallStub(&stub);
__ pop(lr);
@@ -4772,12 +4782,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// sp[0] - last argument
Label normal_sequence;
if (mode == DONT_OVERRIDE) {
- DCHECK(FAST_SMI_ELEMENTS == 0);
- DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
- DCHECK(FAST_ELEMENTS == 2);
- DCHECK(FAST_HOLEY_ELEMENTS == 3);
- DCHECK(FAST_DOUBLE_ELEMENTS == 4);
- DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// is the low bit set? If so, we are holey and that is good.
__ tst(r3, Operand(1));
@@ -5051,6 +5061,158 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context = cp;
+ Register result = r0;
+ Register slot = r2;
+
+ // Go up the context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ ldr(result, ContextOperand(context, Context::PREVIOUS_INDEX));
+ context = result;
+ }
+
+ // Load the PropertyCell value at the specified slot.
+ __ add(result, context, Operand(slot, LSL, kPointerSizeLog2));
+ __ ldr(result, ContextOperand(result));
+ __ ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
+
+ // If the result is not the_hole, return. Otherwise, handle in the runtime.
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ __ Ret(ne);
+
+ // Fallback to runtime.
+ __ SmiTag(slot);
+ __ push(slot);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+}
+
+
+void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register value = r0;
+ Register slot = r2;
+
+ Register cell = r1;
+ Register cell_details = r4;
+ Register cell_value = r5;
+ Register cell_value_map = r6;
+ Register scratch = r9;
+
+ Register context = cp;
+ Register context_temp = cell;
+
+ Label fast_heapobject_case, fast_smi_case, slow_case;
+
+ if (FLAG_debug_code) {
+ __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
+ __ Check(ne, kUnexpectedValue);
+ }
+
+ // Go up the context chain to the script context.
+ for (int i = 0; i < depth(); i++) {
+ __ ldr(context_temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ context = context_temp;
+ }
+
+ // Load the PropertyCell at the specified slot.
+ __ add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
+ __ ldr(cell, ContextOperand(cell));
+
+ // Load PropertyDetails for the cell (actually only the cell_type and kind).
+ __ ldr(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
+ __ SmiUntag(cell_details);
+ __ and_(cell_details, cell_details,
+ Operand(PropertyDetails::PropertyCellTypeField::kMask |
+ PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask));
+
+ // Check if PropertyCell holds mutable data.
+ Label not_mutable_data;
+ __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kMutable) |
+ PropertyDetails::KindField::encode(kData)));
+ __ b(ne, &not_mutable_data);
+ __ JumpIfSmi(value, &fast_smi_case);
+
+ __ bind(&fast_heapobject_case);
+ __ str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ // RecordWriteField clobbers the value register, so we copy it before the
+ // call.
+ __ mov(r4, Operand(value));
+ __ RecordWriteField(cell, PropertyCell::kValueOffset, r4, scratch,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Ret();
+
+ __ bind(&not_mutable_data);
+ // Check if PropertyCell value matches the new value (relevant for Constant,
+ // ConstantType and Undefined cells).
+ Label not_same_value;
+ __ ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ __ cmp(cell_value, value);
+ __ b(ne, &not_same_value);
+
+ // Make sure the PropertyCell is not marked READ_ONLY.
+ __ tst(cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
+ __ b(ne, &slow_case);
+
+ if (FLAG_debug_code) {
+ Label done;
+ // This can only be true for Constant, ConstantType and Undefined cells,
+ // because we never store the_hole via this stub.
+ __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstant) |
+ PropertyDetails::KindField::encode(kData)));
+ __ b(eq, &done);
+ __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+ __ b(eq, &done);
+ __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kUndefined) |
+ PropertyDetails::KindField::encode(kData)));
+ __ Check(eq, kUnexpectedValue);
+ __ bind(&done);
+ }
+ __ Ret();
+ __ bind(&not_same_value);
+
+ // Check if PropertyCell contains data with constant type (and is not
+ // READ_ONLY).
+ __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+ __ b(ne, &slow_case);
+
+ // Now either both old and new values must be smis or both must be heap
+ // objects with same map.
+ Label value_is_heap_object;
+ __ JumpIfNotSmi(value, &value_is_heap_object);
+ __ JumpIfNotSmi(cell_value, &slow_case);
+ // Old and new values are smis, no need for a write barrier here.
+ __ bind(&fast_smi_case);
+ __ str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ __ Ret();
+
+ __ bind(&value_is_heap_object);
+ __ JumpIfSmi(cell_value, &slow_case);
+
+ __ ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
+ __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ cmp(cell_value_map, scratch);
+ __ b(eq, &fast_heapobject_case);
+
+ // Fallback to runtime.
+ __ bind(&slow_case);
+ __ SmiTag(slot);
+ __ Push(slot, value);
+ __ TailCallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2, 1);
+}
+
+
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index ddea33a34b..b2b2c08cd8 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -5,6 +5,8 @@
#ifndef V8_ARM_CODE_STUBS_ARM_H_
#define V8_ARM_CODE_STUBS_ARM_H_
+#include "src/arm/frames-arm.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index a456996a27..6a9f4677f6 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/arm/simulator-arm.h"
@@ -888,10 +886,9 @@ CodeAgingHelper::CodeAgingHelper() {
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
- SmartPointer<CodePatcher> patcher(
- new CodePatcher(young_sequence_.start(),
- young_sequence_.length() / Assembler::kInstrSize,
- CodePatcher::DONT_FLUSH));
+ base::SmartPointer<CodePatcher> patcher(new CodePatcher(
+ young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushFixedFrame(r1);
patcher->masm()->nop(ip.code());
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 4c7c7688fd..d36ce59d66 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -12,9 +12,6 @@ namespace v8 {
namespace internal {
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc
index 0749356909..9fefc3140a 100644
--- a/deps/v8/src/arm/constants-arm.cc
+++ b/deps/v8/src/arm/constants-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h"
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 7b8529c4bb..6d544f3f36 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -5,6 +5,12 @@
#ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_
+#include <stdint.h>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/globals.h"
+
// ARM EABI is required.
#if defined(__arm__) && !defined(__ARM_EABI__)
#error ARM EABI support is required.
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index dd2d13c686..f291ba92ca 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -12,8 +12,6 @@
#endif
#endif
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/assembler.h"
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
deleted file mode 100644
index 7d9313200b..0000000000
--- a/deps/v8/src/arm/debug-arm.cc
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/codegen.h"
-#include "src/debug.h"
-
-namespace v8 {
-namespace internal {
-
-void BreakLocation::SetDebugBreakAtReturn() {
- // Patch the code changing the return from JS function sequence from
- // mov sp, fp
- // ldmia sp!, {fp, lr}
- // add sp, sp, #4
- // bx lr
- // to a call to the debug break return code.
- // ldr ip, [pc, #0]
- // blx ip
- // <debug break return code entry point address>
- // bkpt 0
- CodePatcher patcher(pc(), Assembler::kJSReturnSequenceInstructions);
- patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
- patcher.masm()->blx(v8::internal::ip);
- patcher.Emit(
- debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry());
- patcher.masm()->bkpt(0);
-}
-
-
-void BreakLocation::SetDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- // Patch the code changing the debug break slot code from
- // mov r2, r2
- // mov r2, r2
- // mov r2, r2
- // to a call to the debug break slot code.
- // ldr ip, [pc, #0]
- // blx ip
- // <debug break slot code entry point address>
- CodePatcher patcher(pc(), Assembler::kDebugBreakSlotInstructions);
- patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
- patcher.masm()->blx(v8::internal::ip);
- patcher.Emit(
- debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry());
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs) {
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Load padding words on stack.
- __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
- for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
- __ push(ip);
- }
- __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
- __ push(ip);
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- DCHECK((object_regs & ~kJSCallerSaved) == 0);
- DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
- DCHECK((object_regs & non_object_regs) == 0);
- if ((object_regs | non_object_regs) != 0) {
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ tst(reg, Operand(0xc0000000));
- __ Assert(eq, kUnableToEncodeValueAsSmi);
- }
- __ SmiTag(reg);
- }
- }
- __ stm(db_w, sp, object_regs | non_object_regs);
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ mov(r0, Operand::Zero()); // no arguments
- __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(masm->isolate(), 1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- if ((object_regs | non_object_regs) != 0) {
- __ ldm(ia_w, sp, object_regs | non_object_regs);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- __ SmiUntag(reg);
- }
- if (FLAG_debug_code &&
- (((object_regs |non_object_regs) & (1 << r)) == 0)) {
- __ mov(reg, Operand(kDebugZapValue));
- }
- }
- }
-
- // Don't bother removing padding bytes pushed on the stack
- // as the frame is going to be restored right away.
-
- // Leave the internal frame.
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ mov(ip, Operand(after_break_target));
- __ ldr(ip, MemOperand(ip));
- __ Jump(ip);
-}
-
-
-void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallICStub
- // ----------- S t a t e -------------
- // -- r1 : function
- // -- r3 : slot in feedback array (smi)
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r3.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // In places other than IC call sites it is expected that r0 is TOS which
- // is an object - this is not generally the case so this should be used with
- // care.
- Generate_DebugBreakCallHelper(masm, r0.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-arm.cc).
- // ----------- S t a t e -------------
- // -- r1 : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-arm.cc)
- // ----------- S t a t e -------------
- // -- r0 : number of arguments (not smi)
- // -- r1 : constructor function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
-}
-
-
-void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
- MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-arm.cc)
- // ----------- S t a t e -------------
- // -- r0 : number of arguments (not smi)
- // -- r1 : constructor function
- // -- r2 : feedback array
- // -- r3 : feedback slot (smi)
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), r0.bit());
-}
-
-
-void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction. Avoid emitting
- // the constant pool in the debug break slot code.
- Assembler::BlockConstPoolScope block_const_pool(masm);
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
- __ nop(MacroAssembler::DEBUG_BREAK_NOP);
- }
- DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
- masm->InstructionsGeneratedSince(&check_codesize));
-}
-
-
-void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0);
-}
-
-
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ mov(ip, Operand(restarter_frame_function_slot));
- __ mov(r1, Operand::Zero());
- __ str(r1, MemOperand(ip, 0));
-
- // Load the function pointer off of our current stack frame.
- __ ldr(r1, MemOperand(fp,
- StandardFrameConstants::kConstantPoolOffset - kPointerSize));
-
- // Pop return address, frame and constant pool pointer (if
- // FLAG_enable_embedded_constant_pool).
- __ LeaveFrame(StackFrame::INTERNAL);
-
- { ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- // Load context from the function.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Get function code.
- __ ldr(ip, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
- __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Re-run JSFunction, r1 is function, cp is context.
- __ Jump(ip);
- }
-}
-
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index a9bcea9726..312bb00df3 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/codegen.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/safepoint-table.h"
namespace v8 {
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 1c1516d168..0cc24e00af 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -28,8 +28,6 @@
#include <stdio.h>
#include <string.h>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h"
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index 3f3c4f04c2..2004ff6ae9 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/assembler.h"
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index db6a9e52e0..dcba34f017 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -128,12 +128,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
} } // namespace v8::internal
#endif // V8_ARM_FRAMES_ARM_H_
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index 67f65f5cb3..f26b62ccaa 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/interface-descriptors.h"
@@ -36,7 +34,11 @@ const Register VectorStoreICDescriptor::VectorRegister() { return r3; }
const Register StoreTransitionDescriptor::MapRegister() { return r3; }
-const Register ElementTransitionAndStoreDescriptor::MapRegister() { return r3; }
+const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r2; }
+
+
+const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r2; }
+const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r0; }
const Register InstanceofDescriptor::left() { return r0; }
@@ -62,6 +64,14 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
+void StoreTransitionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ MapRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
@@ -83,6 +93,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
}
+// static
+const Register ToObjectDescriptor::ReceiverRegister() { return r0; }
+
+
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
@@ -158,11 +172,11 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// r0 : number of arguments
// r1 : the function to call
// r2 : feedback vector
- // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
- // vector (Smi)
+ // r3 : slot in feedback vector (Smi, for RecordCallTarget)
+ // r4 : original constructor (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {r0, r1, r2};
+ Register registers[] = {r0, r1, r4, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -353,11 +367,22 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
+void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r1, // math rounding function
+ r3, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void MathRoundVariantCallFromOptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
r1, // math rounding function
r3, // vector slot id
+ r4, // type vector
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 55e501762c..1c04ba7ee7 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -4,8 +4,6 @@
#include <sstream>
-#include "src/v8.h"
-
#include "src/arm/lithium-codegen-arm.h"
#include "src/hydrogen-osr.h"
#include "src/lithium-inl.h"
@@ -330,6 +328,11 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
+void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d", depth(), slot_index());
+}
+
+
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -348,6 +351,12 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
+void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
+ value()->PrintTo(stream);
+}
+
+
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -1661,8 +1670,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsExternal()) {
- DCHECK(instr->left()->representation().IsExternal());
- DCHECK(instr->right()->representation().IsInteger32());
+ DCHECK(instr->IsConsistentExternalRepresentation());
DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
@@ -2150,6 +2158,15 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
+ HLoadGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ DCHECK(instr->slot_index() > 0);
+ LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2218,7 +2235,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LInstruction* result = NULL;
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseRegister(instr->elements());
@@ -2238,10 +2255,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
bool needs_environment;
- if (instr->is_external() || instr->is_fixed_typed_array()) {
+ if (instr->is_fixed_typed_array()) {
// see LCodeGen::DoLoadKeyedExternalArray
- needs_environment = (elements_kind == EXTERNAL_UINT32_ELEMENTS ||
- elements_kind == UINT32_ELEMENTS) &&
+ needs_environment = elements_kind == UINT32_ELEMENTS &&
!instr->CheckFlag(HInstruction::kUint32);
} else {
// see LCodeGen::DoLoadKeyedFixedDoubleArray and
@@ -2276,7 +2292,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
DCHECK(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
@@ -2308,10 +2324,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(instr->elements_kind())));
- DCHECK((instr->is_fixed_typed_array() &&
- instr->elements()->representation().IsTagged()) ||
- (instr->is_external() &&
- instr->elements()->representation().IsExternal()));
+ DCHECK(instr->elements()->representation().IsExternal());
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* backing_store = UseRegister(instr->elements());
@@ -2437,6 +2450,19 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
+ HStoreGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* value = UseFixed(instr->value(),
+ StoreGlobalViaContextDescriptor::ValueRegister());
+ DCHECK(instr->slot_index() > 0);
+
+ LStoreGlobalViaContext* result =
+ new (zone()) LStoreGlobalViaContext(context, value);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r1);
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index d61c3d4c0d..eea9ece5ae 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -102,6 +102,7 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
+ V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -143,6 +144,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
+ V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1645,15 +1647,9 @@ class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- bool is_external() const {
- return hydrogen()->is_external();
- }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@@ -1700,7 +1696,23 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
+ TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+
+class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ LOperand* context() { return inputs_[0]; }
+
+ int depth() const { return hydrogen()->depth(); }
+ int slot_index() const { return hydrogen()->slot_index(); }
};
@@ -2205,6 +2217,28 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
+class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreGlobalViaContext(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
+ "store-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int depth() { return hydrogen()->depth(); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
@@ -2213,13 +2247,9 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
inputs_[2] = value;
}
- bool is_external() const { return hydrogen()->is_external(); }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 747730b3f5..606721f2da 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/arm/lithium-codegen-arm.h"
#include "src/arm/lithium-gap-resolver-arm.h"
#include "src/base/bits.h"
@@ -106,7 +104,7 @@ bool LCodeGen::GeneratePrologue() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop_at");
}
#endif
@@ -427,6 +425,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
+ AllowDeferredHandleDereference get_number;
DCHECK(literal->IsNumber());
__ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
} else if (r.IsDouble()) {
@@ -648,15 +647,23 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
if (op->IsStackSlot()) {
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
if (is_tagged) {
- translation->StoreStackSlot(op->index());
+ translation->StoreStackSlot(index);
} else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
+ translation->StoreUint32StackSlot(index);
} else {
- translation->StoreInt32StackSlot(op->index());
+ translation->StoreInt32StackSlot(index);
}
} else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
+ translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -2267,6 +2274,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ b(eq, instr->TrueLabel(chunk_));
}
+ if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ // SIMD value -> true.
+ __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
+ __ b(eq, instr->TrueLabel(chunk_));
+ }
+
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
DwVfpRegister dbl_scratch = double_scratch0();
@@ -2969,13 +2982,31 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
- PREMONOMORPHIC).code();
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+ SLOPPY, PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->result()).is(r0));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ Handle<Code> stub =
+ CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3068,7 +3099,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -3162,17 +3193,13 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
? (element_size_shift - kSmiTagSize) : element_size_shift;
int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
__ vldr(double_scratch0().low(), scratch0(), base_offset);
__ vcvt_f64_f32(result, double_scratch0().low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
@@ -3184,29 +3211,22 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
key, external_pointer, key_is_constant, constant_key,
element_size_shift, shift_size, base_offset);
switch (elements_kind) {
- case EXTERNAL_INT8_ELEMENTS:
case INT8_ELEMENTS:
__ ldrsb(result, mem_operand);
break;
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
__ ldrb(result, mem_operand);
break;
- case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
__ ldrsh(result, mem_operand);
break;
- case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
__ ldrh(result, mem_operand);
break;
- case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
__ ldr(result, mem_operand);
break;
- case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
__ ldr(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
@@ -3216,8 +3236,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
@@ -3327,7 +3345,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3570,12 +3588,11 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- __ push(cp); // The context is the first argument.
__ Move(scratch0(), instr->hydrogen()->pairs());
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kDeclareGlobals, 2, instr);
}
@@ -4220,6 +4237,30 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->value())
+ .is(StoreGlobalViaContextDescriptor::ValueRegister()));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
+ isolate(), depth, instr->language_mode())
+ .code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ push(StoreGlobalViaContextDescriptor::ValueRegister());
+ __ CallRuntime(is_strict(instr->language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
if (instr->index()->IsConstantOperand()) {
@@ -4262,10 +4303,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
? (element_size_shift - kSmiTagSize) : element_size_shift;
int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
Register address = scratch0();
DwVfpRegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
@@ -4278,8 +4316,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
} else {
__ add(address, external_pointer, Operand(key, LSL, shift_size));
}
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
__ vstr(double_scratch0().low(), address, base_offset);
} else { // Storing doubles, not floats.
@@ -4292,30 +4329,21 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
element_size_shift, shift_size,
base_offset);
switch (elements_kind) {
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_INT8_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
case INT8_ELEMENTS:
__ strb(value, mem_operand);
break;
- case EXTERNAL_INT16_ELEMENTS:
- case EXTERNAL_UINT16_ELEMENTS:
case INT16_ELEMENTS:
case UINT16_ELEMENTS:
__ strh(value, mem_operand);
break;
- case EXTERNAL_INT32_ELEMENTS:
- case EXTERNAL_UINT32_ELEMENTS:
case INT32_ELEMENTS:
case UINT32_ELEMENTS:
__ str(value, mem_operand);
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -4421,7 +4449,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases: external, fast double
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -5630,10 +5658,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
} else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label);
__ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
- __ b(ge, false_label);
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- final_branch_condition = eq;
+ final_branch_condition = lt;
} else if (String::Equals(type_name, factory->symbol_string())) {
__ JumpIfSmi(input, false_label);
@@ -5680,6 +5705,17 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = eq;
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(type_name, factory->type##_string())) { \
+ __ JumpIfSmi(input, false_label); \
+ __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
+ __ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \
+ final_branch_condition = eq;
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
+
} else {
__ b(false_label);
}
@@ -5800,8 +5836,8 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ cmp(sp, Operand(ip));
__ b(hs, &done);
Handle<Code> stack_check = isolate()->builtins()->StackCheck();
- PredictableCodeSizeScope predictable(masm(),
- CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
+ PredictableCodeSizeScope predictable(masm());
+ predictable.ExpectSize(CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
DCHECK(instr->context()->IsRegister());
DCHECK(ToRegister(instr->context()).is(cp));
CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
index f8e4a7f680..31feb11edc 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/arm/lithium-codegen-arm.h"
#include "src/arm/lithium-gap-resolver-arm.h"
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.h b/deps/v8/src/arm/lithium-gap-resolver-arm.h
index 55206d3e60..88f1a7bb67 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.h
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.h
@@ -5,8 +5,6 @@
#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
-#include "src/v8.h"
-
#include "src/lithium.h"
namespace v8 {
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 61e484bd85..4034fa95a4 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -4,8 +4,6 @@
#include <limits.h> // For LONG_MIN, LONG_MAX.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/base/bits.h"
@@ -13,7 +11,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -1434,10 +1432,11 @@ void MacroAssembler::IsObjectNameType(Register object,
void MacroAssembler::DebugBreak() {
mov(r0, Operand::Zero());
- mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
+ mov(r1,
+ Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
CEntryStub ces(isolate(), 1);
DCHECK(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
}
@@ -1875,26 +1874,6 @@ void MacroAssembler::Allocate(Register object_size,
}
-void MacroAssembler::UndoAllocationInNewSpace(Register object,
- Register scratch) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- and_(object, object, Operand(~kHeapObjectTagMask));
-#ifdef DEBUG
- // Check that the object un-allocated is below the current top.
- mov(scratch, Operand(new_space_allocation_top));
- ldr(scratch, MemOperand(scratch));
- cmp(object, scratch);
- Check(lt, kUndoAllocationOfNonAllocatedMemory);
-#endif
- // Write the address of the object to un-allocate as the current top.
- mov(scratch, Operand(new_space_allocation_top));
- str(object, MemOperand(scratch));
-}
-
-
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -3809,23 +3788,35 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
Register scratch1,
Label* found) {
DCHECK(!scratch1.is(scratch0));
- Factory* factory = isolate()->factory();
Register current = scratch0;
- Label loop_again;
+ Label loop_again, end;
// scratch contained elements pointer.
mov(current, object);
+ ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ CompareRoot(current, Heap::kNullValueRootIndex);
+ b(eq, &end);
// Loop based on the map going up the prototype chain.
bind(&loop_again);
ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
+
+ STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ ldrb(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
+ cmp(scratch1, Operand(JS_OBJECT_TYPE));
+ b(lo, found);
+
ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
b(eq, found);
ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
- cmp(current, Operand(factory->null_value()));
+ CompareRoot(current, Heap::kNullValueRootIndex);
b(ne, &loop_again);
+
+ bind(&end);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 7ece4b2fa6..5ec2bd3f8b 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -13,6 +13,19 @@
namespace v8 {
namespace internal {
+// Give alias names to registers for calling conventions.
+const Register kReturnRegister0 = {kRegister_r0_Code};
+const Register kReturnRegister1 = {kRegister_r1_Code};
+const Register kJSFunctionRegister = {kRegister_r1_Code};
+const Register kContextRegister = {kRegister_r7_Code};
+const Register kInterpreterAccumulatorRegister = {kRegister_r0_Code};
+const Register kInterpreterRegisterFileRegister = {kRegister_r4_Code};
+const Register kInterpreterBytecodeOffsetRegister = {kRegister_r5_Code};
+const Register kInterpreterBytecodeArrayRegister = {kRegister_r6_Code};
+const Register kInterpreterDispatchTableRegister = {kRegister_r8_Code};
+const Register kRuntimeCallFunctionRegister = {kRegister_r1_Code};
+const Register kRuntimeCallArgCountRegister = {kRegister_r0_Code};
+
// ----------------------------------------------------------------------------
// Static helper functions
@@ -250,7 +263,7 @@ class MacroAssembler: public Assembler {
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
// The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
void RecordWriteField(
Register object,
int offset,
@@ -325,9 +338,7 @@ class MacroAssembler: public Assembler {
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Condition cond = al) {
- DCHECK(!src1.is(src2));
- DCHECK(!src2.is(src3));
- DCHECK(!src1.is(src3));
+ DCHECK(!AreAliased(src1, src2, src3));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
@@ -347,12 +358,7 @@ class MacroAssembler: public Assembler {
Register src3,
Register src4,
Condition cond = al) {
- DCHECK(!src1.is(src2));
- DCHECK(!src2.is(src3));
- DCHECK(!src1.is(src3));
- DCHECK(!src1.is(src4));
- DCHECK(!src2.is(src4));
- DCHECK(!src3.is(src4));
+ DCHECK(!AreAliased(src1, src2, src3, src4));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
@@ -374,6 +380,36 @@ class MacroAssembler: public Assembler {
}
}
+ // Push five registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4,
+ Register src5, Condition cond = al) {
+ DCHECK(!AreAliased(src1, src2, src3, src4, src5));
+ if (src1.code() > src2.code()) {
+ if (src2.code() > src3.code()) {
+ if (src3.code() > src4.code()) {
+ if (src4.code() > src5.code()) {
+ stm(db_w, sp,
+ src1.bit() | src2.bit() | src3.bit() | src4.bit() | src5.bit(),
+ cond);
+ } else {
+ stm(db_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
+ cond);
+ str(src5, MemOperand(sp, 4, NegPreIndex), cond);
+ }
+ } else {
+ stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+ Push(src4, src5, cond);
+ }
+ } else {
+ stm(db_w, sp, src1.bit() | src2.bit(), cond);
+ Push(src3, src4, src5, cond);
+ }
+ } else {
+ str(src1, MemOperand(sp, 4, NegPreIndex), cond);
+ Push(src2, src3, src4, src5, cond);
+ }
+ }
+
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Condition cond = al) {
DCHECK(!src1.is(src2));
@@ -387,9 +423,7 @@ class MacroAssembler: public Assembler {
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
- DCHECK(!src1.is(src2));
- DCHECK(!src2.is(src3));
- DCHECK(!src1.is(src3));
+ DCHECK(!AreAliased(src1, src2, src3));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
@@ -409,12 +443,7 @@ class MacroAssembler: public Assembler {
Register src3,
Register src4,
Condition cond = al) {
- DCHECK(!src1.is(src2));
- DCHECK(!src2.is(src3));
- DCHECK(!src1.is(src3));
- DCHECK(!src1.is(src4));
- DCHECK(!src2.is(src4));
- DCHECK(!src3.is(src4));
+ DCHECK(!AreAliased(src1, src2, src3, src4));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
@@ -745,13 +774,6 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. The caller must make sure that no pointers
- // are left to the object(s) no longer allocated as they would be invalid when
- // allocation is undone.
- void UndoAllocationInNewSpace(Register object, Register scratch);
-
-
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -1513,7 +1535,7 @@ class CodePatcher {
CodePatcher(byte* address,
int instructions,
FlushICache flush_cache = FLUSH);
- virtual ~CodePatcher();
+ ~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
@@ -1539,7 +1561,7 @@ class CodePatcher {
// -----------------------------------------------------------------------------
// Static helper functions.
-inline MemOperand ContextOperand(Register context, int index) {
+inline MemOperand ContextOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 3a02ee0094..5da6204050 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -6,8 +6,6 @@
#include <stdlib.h>
#include <cmath>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h"
@@ -1229,9 +1227,15 @@ void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
// Returns the limit of the stack area to enable checking for stack overflows.
-uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
- // pushing values.
+uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
+ // The simulator uses a separate JS stack. If we have exhausted the C stack,
+ // we also drop down the JS limit to reflect the exhaustion on the JS stack.
+ if (GetCurrentStackPosition() < c_limit) {
+ return reinterpret_cast<uintptr_t>(get_sp());
+ }
+
+ // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
+ // to prevent overrunning the stack when pushing values.
return reinterpret_cast<uintptr_t>(stack_) + 1024;
}
@@ -4011,6 +4015,9 @@ void Simulator::Execute() {
void Simulator::CallInternal(byte* entry) {
+ // Adjust JS-based stack limit to C-based stack limit.
+ isolate_->stack_guard()->AdjustStackLimitForSimulator();
+
// Prepare to execute the code at entry
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index eea43efc53..a972a77d41 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -181,12 +181,12 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
- Address get_sp() {
+ Address get_sp() const {
return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
}
// Accessor to the internal simulator stack area.
- uintptr_t StackLimit() const;
+ uintptr_t StackLimit(uintptr_t c_limit) const;
// Executes ARM instructions until the PC reaches end_sim_pc.
void Execute();
@@ -439,15 +439,14 @@ class Simulator {
// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. Setting the c_limit to indicate a very small
-// stack cause stack overflow errors, since the simulator ignores the input.
-// This is unlikely to be an issue in practice, though it might cause testing
-// trouble down the line.
+// the C-based native code. The JS-based limit normally points near the end of
+// the simulator stack. When the C-based limit is exhausted we reflect that by
+// lowering the JS-based limit as well, to make stack checks trigger.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit();
+ return Simulator::current(isolate)->StackLimit(c_limit);
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index bbd44c5f10..3fbb09147b 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -7,7 +7,7 @@
#include "src/arm64/assembler-arm64.h"
#include "src/assembler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
namespace v8 {
@@ -17,7 +17,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
-void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+void RelocInfo::apply(intptr_t delta) {
// On arm64 only internal references need extra work.
DCHECK(RelocInfo::IsInternalReference(rmode_));
@@ -611,11 +611,6 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
-Address Assembler::break_address_from_return_address(Address pc) {
- return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
-}
-
-
Address Assembler::return_address_from_call_start(Address pc) {
// The call, generated by MacroAssembler::Call, is one of two possible
// sequences:
@@ -825,18 +820,18 @@ void RelocInfo::set_code_age_stub(Code* stub,
}
-Address RelocInfo::call_address() {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+Address RelocInfo::debug_call_address() {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
// For the above sequences the Relocinfo points to the load literal loading
// the call address.
+ STATIC_ASSERT(Assembler::kPatchDebugBreakSlotAddressOffset == 0);
return Assembler::target_address_at(pc_, host_);
}
-void RelocInfo::set_call_address(Address target) {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+void RelocInfo::set_debug_call_address(Address target) {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+ STATIC_ASSERT(Assembler::kPatchDebugBreakSlotAddressOffset == 0);
Assembler::set_target_address_at(pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -862,7 +857,7 @@ bool RelocInfo::IsPatchedReturnSequence() {
// The sequence must be:
// ldr ip0, [pc, #offset]
// blr ip0
- // See arm64/debug-arm64.cc BreakLocation::SetDebugBreakAtReturn().
+ // See arm64/debug-arm64.cc DebugCodegen::PatchDebugBreakSlot
Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
Instruction* i2 = i1->following();
return i1->IsLdrLiteralX() && (i1->Rt() == kIp0Code) &&
@@ -888,11 +883,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
visitor->VisitInternalReference(this);
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- isolate->debug()->has_break_points()) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
@@ -913,11 +905,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
StaticVisitor::VisitInternalReference(this);
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
@@ -973,32 +962,6 @@ LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
}
-LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
- const CPURegister& rt, const CPURegister& rt2) {
- DCHECK(AreSameSizeAndType(rt, rt2));
- USE(rt2);
- if (rt.IsRegister()) {
- return rt.Is64Bits() ? LDNP_x : LDNP_w;
- } else {
- DCHECK(rt.IsFPRegister());
- return rt.Is64Bits() ? LDNP_d : LDNP_s;
- }
-}
-
-
-LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
- const CPURegister& rt, const CPURegister& rt2) {
- DCHECK(AreSameSizeAndType(rt, rt2));
- USE(rt2);
- if (rt.IsRegister()) {
- return rt.Is64Bits() ? STNP_x : STNP_w;
- } else {
- DCHECK(rt.IsFPRegister());
- return rt.Is64Bits() ? STNP_d : STNP_s;
- }
-}
-
-
LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index f27d3b97b0..235b5ee2bc 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -26,13 +26,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#define ARM64_DEFINE_REG_STATICS
#include "src/arm64/assembler-arm64-inl.h"
+#include "src/arm64/frames-arm64.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
@@ -1628,37 +1627,6 @@ void Assembler::LoadStorePair(const CPURegister& rt,
}
-void Assembler::ldnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& src) {
- LoadStorePairNonTemporal(rt, rt2, src,
- LoadPairNonTemporalOpFor(rt, rt2));
-}
-
-
-void Assembler::stnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& dst) {
- LoadStorePairNonTemporal(rt, rt2, dst,
- StorePairNonTemporalOpFor(rt, rt2));
-}
-
-
-void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& addr,
- LoadStorePairNonTemporalOp op) {
- DCHECK(!rt.Is(rt2));
- DCHECK(AreSameSizeAndType(rt, rt2));
- DCHECK(addr.IsImmediateOffset());
- LSDataSize size = CalcLSPairDataSize(
- static_cast<LoadStorePairOp>(op & LoadStorePairMask));
- DCHECK(IsImmLSPair(addr.offset(), size));
- int offset = static_cast<int>(addr.offset());
- Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(offset, size));
-}
-
-
// Memory instructions.
void Assembler::ldrb(const Register& rt, const MemOperand& src) {
LoadStore(rt, src, LDRB_w);
@@ -2902,21 +2870,18 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
- if (((rmode >= RelocInfo::JS_RETURN) &&
- (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
+ if (((rmode >= RelocInfo::COMMENT) &&
+ (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
- (rmode == RelocInfo::CONST_POOL) ||
- (rmode == RelocInfo::VENEER_POOL) ||
- (rmode == RelocInfo::DEOPT_REASON)) {
+ (rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
+ (rmode == RelocInfo::DEOPT_REASON) ||
+ (rmode == RelocInfo::GENERATOR_CONTINUATION)) {
// Adjust code for new modes.
- DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsDeoptReason(rmode)
- || RelocInfo::IsPosition(rmode)
- || RelocInfo::IsInternalReference(rmode)
- || RelocInfo::IsConstPool(rmode)
- || RelocInfo::IsVeneerPool(rmode));
+ DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode) ||
+ RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsPosition(rmode) ||
+ RelocInfo::IsInternalReference(rmode) ||
+ RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode) ||
+ RelocInfo::IsGeneratorContinuation(rmode));
// These modes do not need an entry in the constant pool.
} else {
constpool_.RecordEntry(data, rmode);
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 5fab081d4b..a7e5a06640 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -893,9 +893,6 @@ class Assembler : public AssemblerBase {
// instruction stream that call will return from.
inline static Address return_address_from_call_start(Address pc);
- // Return the code target address of the patch debug break slot
- inline static Address break_address_from_return_address(Address pc);
-
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -955,25 +952,13 @@ class Assembler : public AssemblerBase {
return SizeOfCodeGeneratedSince(label) / kInstructionSize;
}
- // Number of instructions generated for the return sequence in
- // FullCodeGenerator::EmitReturnSequence.
- static const int kJSReturnSequenceInstructions = 7;
- static const int kJSReturnSequenceLength =
- kJSReturnSequenceInstructions * kInstructionSize;
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- static const int kPatchReturnSequenceAddressOffset = 0;
static const int kPatchDebugBreakSlotAddressOffset = 0;
// Number of instructions necessary to be able to later patch it to a call.
- // See DebugCodegen::GenerateSlot() and
- // BreakLocation::SetDebugBreakAtSlot().
- static const int kDebugBreakSlotInstructions = 4;
+ static const int kDebugBreakSlotInstructions = 5;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstructionSize;
- static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstructionSize;
-
// Prevent contant pool emission until EndBlockConstPool is called.
// Call to this function can be nested but must be followed by an equal
// number of call to EndBlockConstpool.
@@ -1022,11 +1007,11 @@ class Assembler : public AssemblerBase {
int buffer_space() const;
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
+ // Mark generator continuation.
+ void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot();
+ void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
// Record the emission of a constant pool.
//
@@ -1507,14 +1492,6 @@ class Assembler : public AssemblerBase {
// Load word pair with sign extension.
void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
- // Load integer or FP register pair, non-temporal.
- void ldnp(const CPURegister& rt, const CPURegister& rt2,
- const MemOperand& src);
-
- // Store integer or FP register pair, non-temporal.
- void stnp(const CPURegister& rt, const CPURegister& rt2,
- const MemOperand& dst);
-
// Load literal to register from a pc relative address.
void ldr_pcrel(const CPURegister& rt, int imm19);
@@ -2022,10 +1999,6 @@ class Assembler : public AssemblerBase {
static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
const CPURegister& rt2);
- static inline LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
- const CPURegister& rt, const CPURegister& rt2);
- static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
- const CPURegister& rt, const CPURegister& rt2);
static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
// Remove the specified branch from the unbound label link chain.
@@ -2051,10 +2024,6 @@ class Assembler : public AssemblerBase {
const Operand& operand,
FlagsUpdate S,
Instr op);
- void LoadStorePairNonTemporal(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& addr,
- LoadStorePairNonTemporalOp op);
void ConditionalSelect(const Register& rd,
const Register& rn,
const Register& rm,
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc
index 45ac1a063b..19a83646f9 100644
--- a/deps/v8/src/arm64/builtins-arm64.cc
+++ b/deps/v8/src/arm64/builtins-arm64.cc
@@ -2,14 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
+#include "src/arm64/frames-arm64.h"
#include "src/codegen.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -302,36 +301,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
-static void Generate_Runtime_NewObject(MacroAssembler* masm,
- bool create_memento,
- Register original_constructor,
- Label* count_incremented,
- Label* allocated) {
- if (create_memento) {
- // Get the cell or allocation site.
- __ Peek(x4, 2 * kXRegSize);
- __ Push(x4);
- __ Push(x1); // Argument for Runtime_NewObject.
- __ Push(original_constructor);
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- __ Mov(x4, x0);
- // If we ended up using the runtime, and we want a memento, then the
- // runtime call made it for us, and we shouldn't do create count
- // increment.
- __ jmp(count_incremented);
- } else {
- __ Push(x1); // Argument for Runtime_NewObject.
- __ Push(original_constructor);
- __ CallRuntime(Runtime::kNewObject, 2);
- __ Mov(x4, x0);
- __ jmp(allocated);
- }
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
@@ -352,44 +323,35 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
- // Preserve the three incoming parameters on the stack.
- if (create_memento) {
- __ AssertUndefinedOrAllocationSite(x2, x10);
- __ Push(x2);
- }
-
+ // Preserve the four incoming parameters on the stack.
Register argc = x0;
Register constructor = x1;
+ Register allocation_site = x2;
Register original_constructor = x3;
// Preserve the incoming parameters on the stack.
+ __ AssertUndefinedOrAllocationSite(allocation_site, x10);
__ SmiTag(argc);
- if (use_new_target) {
- __ Push(argc, constructor, original_constructor);
- } else {
- __ Push(argc, constructor);
- }
- // sp[0]: new.target (if used)
- // sp[0/1]: Constructor function.
- // sp[1/2]: number of arguments (smi-tagged)
-
- Label rt_call, count_incremented, allocated, normal_new;
- __ Cmp(constructor, original_constructor);
- __ B(eq, &normal_new);
- Generate_Runtime_NewObject(masm, create_memento, original_constructor,
- &count_incremented, &allocated);
-
- __ Bind(&normal_new);
+ __ Push(allocation_site, argc, constructor, original_constructor);
+ // sp[0]: new.target
+ // sp[1]: Constructor function.
+ // sp[2]: number of arguments (smi-tagged)
+ // sp[3]: allocation site
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
if (FLAG_inline_new) {
- Label undo_allocation;
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(isolate);
__ Mov(x2, Operand(debug_step_in_fp));
__ Ldr(x2, MemOperand(x2));
__ Cbnz(x2, &rt_call);
+
+ // Fall back to runtime if the original constructor and function differ.
+ __ Cmp(constructor, original_constructor);
+ __ B(ne, &rt_call);
+
// Load the initial map and verify that it is in fact a map.
Register init_map = x2;
__ Ldr(init_map,
@@ -430,15 +392,18 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Now allocate the JSObject on the heap.
+ Label rt_call_reload_new_target;
Register obj_size = x3;
Register new_obj = x4;
__ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
if (create_memento) {
__ Add(x7, obj_size,
Operand(AllocationMemento::kSize / kPointerSize));
- __ Allocate(x7, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS);
+ __ Allocate(x7, new_obj, x10, x11, &rt_call_reload_new_target,
+ SIZE_IN_WORDS);
} else {
- __ Allocate(obj_size, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS);
+ __ Allocate(obj_size, new_obj, x10, x11, &rt_call_reload_new_target,
+ SIZE_IN_WORDS);
}
// Allocated the JSObject, now initialize the fields. Map is set to
@@ -460,15 +425,21 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Obtain number of pre-allocated property fields and in-object
// properties.
- Register prealloc_fields = x10;
+ Register unused_props = x10;
Register inobject_props = x11;
- Register inst_sizes = x11;
- __ Ldr(inst_sizes, FieldMemOperand(init_map, Map::kInstanceSizesOffset));
- __ Ubfx(prealloc_fields, inst_sizes,
- Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ Ubfx(inobject_props, inst_sizes,
- Map::kInObjectPropertiesByte * kBitsPerByte, kBitsPerByte);
+ Register inst_sizes_or_attrs = x11;
+ Register prealloc_fields = x10;
+ __ Ldr(inst_sizes_or_attrs,
+ FieldMemOperand(init_map, Map::kInstanceAttributesOffset));
+ __ Ubfx(unused_props, inst_sizes_or_attrs,
+ Map::kUnusedPropertyFieldsByte * kBitsPerByte, kBitsPerByte);
+ __ Ldr(inst_sizes_or_attrs,
+ FieldMemOperand(init_map, Map::kInstanceSizesOffset));
+ __ Ubfx(
+ inobject_props, inst_sizes_or_attrs,
+ Map::kInObjectPropertiesOrConstructorFunctionIndexByte * kBitsPerByte,
+ kBitsPerByte);
+ __ Sub(prealloc_fields, inobject_props, unused_props);
// Calculate number of property fields in the object.
Register prop_fields = x6;
@@ -511,7 +482,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
__ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
// Load the AllocationSite
- __ Peek(x14, 2 * kXRegSize);
+ __ Peek(x14, 3 * kXRegSize);
+ __ AssertUndefinedOrAllocationSite(x14, x10);
DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
__ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
first_prop = NoReg;
@@ -523,72 +495,44 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
+ // and jump into the continuation code at any time from now on.
__ Add(new_obj, new_obj, kHeapObjectTag);
- // Check if a non-empty properties array is needed. Continue with
- // allocated object if not; allocate and initialize a FixedArray if yes.
- Register element_count = x3;
- __ Ldrb(element_count,
- FieldMemOperand(init_map, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields
- // and in-object properties.
- __ Add(element_count, element_count, prealloc_fields);
- __ Subs(element_count, element_count, inobject_props);
-
- // Done if no extra properties are to be allocated.
- __ B(eq, &allocated);
- __ Assert(pl, kPropertyAllocationCountFailed);
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- Register new_array = x5;
- Register array_size = x6;
- __ Add(array_size, element_count, FixedArray::kHeaderSize / kPointerSize);
- __ Allocate(array_size, new_array, x11, x12, &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP |
- SIZE_IN_WORDS));
-
- Register array_map = x10;
- __ LoadRoot(array_map, Heap::kFixedArrayMapRootIndex);
- __ Str(array_map, MemOperand(new_array, FixedArray::kMapOffset));
- __ SmiTag(x0, element_count);
- __ Str(x0, MemOperand(new_array, FixedArray::kLengthOffset));
-
- // Initialize the fields to undefined.
- Register elements = x10;
- __ Add(elements, new_array, FixedArray::kHeaderSize);
- __ FillFields(elements, element_count, filler);
-
- // Store the initialized FixedArray into the properties field of the
- // JSObject.
- __ Add(new_array, new_array, kHeapObjectTag);
- __ Str(new_array, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
-
// Continue with JSObject being successfully allocated.
__ B(&allocated);
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- __ Bind(&undo_allocation);
- __ UndoAllocationInNewSpace(new_obj, x14);
+ // Reload the original constructor and fall-through.
+ __ Bind(&rt_call_reload_new_target);
+ __ Peek(x3, 0 * kXRegSize);
}
// Allocate the new receiver object using the runtime call.
+ // x1: constructor function
+ // x3: original constructor
__ Bind(&rt_call);
- Generate_Runtime_NewObject(masm, create_memento, constructor,
- &count_incremented, &allocated);
+ Label count_incremented;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ Peek(x4, 3 * kXRegSize);
+ __ Push(x4, constructor, original_constructor); // arguments 1-3
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ __ Mov(x4, x0);
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ __ B(&count_incremented);
+ } else {
+ __ Push(constructor, original_constructor); // arguments 1-2
+ __ CallRuntime(Runtime::kNewObject, 2);
+ __ Mov(x4, x0);
+ }
// Receiver for constructor call allocated.
// x4: JSObject
__ Bind(&allocated);
if (create_memento) {
- int offset = (use_new_target ? 3 : 2) * kXRegSize;
- __ Peek(x10, offset);
+ __ Peek(x10, 3 * kXRegSize);
__ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &count_incremented);
// r2 is an AllocationSite. We are creating a memento from it, so we
// need to increment the memento create count.
@@ -601,9 +545,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore the parameters.
- if (use_new_target) {
- __ Pop(original_constructor);
- }
+ __ Pop(original_constructor);
__ Pop(constructor);
// Reload the number of arguments from the stack.
@@ -612,11 +554,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Peek(argc, 0); // Load number of arguments.
__ SmiUntag(argc);
- if (use_new_target) {
- __ Push(original_constructor, x4, x4);
- } else {
- __ Push(x4, x4);
- }
+ __ Push(original_constructor, x4, x4);
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@@ -628,8 +566,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// x2: address of last argument (caller sp)
// jssp[0]: receiver
// jssp[1]: receiver
- // jssp[2]: new.target (if used)
- // jssp[2/3]: number of arguments (smi-tagged)
+ // jssp[2]: new.target
+ // jssp[3]: number of arguments (smi-tagged)
// Compute the start address of the copy in x3.
__ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
Label loop, entry, done_copying_arguments;
@@ -660,17 +598,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- // TODO(arv): Remove the "!use_new_target" before supporting optimization
- // of functions that reference new.target
- if (!is_api_function && !use_new_target) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore the context from the frame.
// x0: result
// jssp[0]: receiver
- // jssp[1]: new.target (if used)
- // jssp[1/2]: number of arguments (smi-tagged)
+ // jssp[1]: new.target
+ // jssp[2]: number of arguments (smi-tagged)
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
@@ -698,10 +634,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Bind(&exit);
// x0: result
// jssp[0]: receiver (newly allocated object)
- // jssp[1]: new.target (if used)
- // jssp[1/2]: number of arguments (smi-tagged)
- int offset = (use_new_target ? 2 : 1) * kXRegSize;
- __ Peek(x1, offset);
+ // jssp[1]: new.target (original constructor)
+ // jssp[2]: number of arguments (smi-tagged)
+ __ Peek(x1, 2 * kXRegSize);
// Leave construct frame.
}
@@ -714,17 +649,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, true, false);
}
@@ -739,18 +669,18 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// -----------------------------------
ASM_LOCATION("Builtins::Generate_JSConstructStubForDerived");
- // TODO(dslomov): support pretenuring
- CHECK(!FLAG_pretenuring_call_new);
-
{
FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
+
+ __ AssertUndefinedOrAllocationSite(x2, x10);
__ Mov(x4, x0);
__ SmiTag(x4);
__ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
- __ Push(x4, x3, x10);
- // sp[0]: number of arguments
+ __ Push(x2, x4, x3, x10);
+ // sp[0]: receiver (the hole)
// sp[1]: new.target
- // sp[2]: receiver (the hole)
+ // sp[2]: number of arguments
+ // sp[3]: allocation site
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@@ -964,6 +894,144 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// - x1: the JS function object being called.
+// - cp: our context.
+// - fp: our caller's frame pointer.
+// - jssp: stack pointer.
+// - lr: return address.
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-arm64.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ Push(lr, fp, cp, x1);
+ __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into kInterpreterBytecodeRegister.
+ __ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ AssertNotSmi(kInterpreterBytecodeArrayRegister,
+ kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0,
+ BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size from the BytecodeArray object.
+ __ Ldr(w11, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ DCHECK(jssp.Is(__ StackPointer()));
+ __ Sub(x10, jssp, Operand(x11));
+ __ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
+ __ B(hs, &ok);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ Bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ // Note: there should always be at least one stack slot for the return
+ // register in the register file.
+ Label loop_header;
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ // TODO(rmcilroy): Ensure we always have an even number of registers to
+ // allow stack to be 16 bit aligned (and remove need for jssp).
+ __ Lsr(x11, x11, kPointerSizeLog2);
+ __ PushMultipleTimes(x10, x11);
+ __ Bind(&loop_header);
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Support profiler (specifically profiling_counter).
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Allow simulator stop operations if FLAG_stop_at is set.
+ // - Deal with sloppy mode functions which need to replace the
+ // receiver with the global proxy when called as functions (without an
+ // explicit receiver object).
+ // - Code aging of the BytecodeArray object.
+ // - Supporting FLAG_trace.
+ //
+ // The following items are also not done here, and will probably be done using
+ // explicit bytecodes instead:
+ // - Allocating a new local context if applicable.
+ // - Setting up a local binding to the this function, which is used in
+ // derived constructors with super calls.
+ // - Setting new.target if required.
+ // - Dealing with REST parameters (only if
+ // https://codereview.chromium.org/1235153006 doesn't land by then).
+ // - Dealing with argument objects.
+
+ // Perform stack guard check.
+ {
+ Label ok;
+ __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ Bind(&ok);
+ }
+
+ // Load accumulator, register file, bytecode offset, dispatch table into
+ // registers.
+ __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ Sub(kInterpreterRegisterFileRegister, fp,
+ Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ Mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ Add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Dispatch to the first bytecode handler for the function.
+ __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
+ __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(ip0);
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // The return value is in accumulator, which is already in x0.
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ // Drop receiver + arguments.
+ // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+ __ Drop(1, kXRegSize);
+ __ Ret();
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@@ -1291,8 +1359,10 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(argc);
- __ Push(argc, receiver);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Push(argc);
+ __ Mov(x0, receiver);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Mov(receiver, x0);
__ Pop(argc);
@@ -1400,6 +1470,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int vectorOffset,
const int argumentsOffset,
const int indexOffset,
const int limitOffset) {
@@ -1417,12 +1488,9 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ Ldr(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
- FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
- Handle<TypeFeedbackVector> feedback_vector =
- masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
- int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
- __ Mov(slot, Smi::FromInt(index));
- __ Mov(vector, feedback_vector);
+ int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
+ __ Mov(slot, Operand(Smi::FromInt(slot_index)));
+ __ Ldr(vector, MemOperand(fp, vectorOffset));
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -1457,14 +1525,24 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
const int kReceiverOffset = kArgumentsOffset + kPointerSize;
const int kFunctionOffset = kReceiverOffset + kPointerSize;
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+ const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
+ const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
Register args = x12;
Register receiver = x14;
Register function = x15;
+ Register apply_function = x1;
+
+ // Push the vector.
+ __ Ldr(
+ apply_function,
+ FieldMemOperand(apply_function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(apply_function,
+ FieldMemOperand(apply_function,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+ __ Push(apply_function);
// Get the length of the arguments via a builtin call.
__ Ldr(function, MemOperand(fp, kFunctionOffset));
@@ -1518,8 +1596,9 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
// Call a builtin to convert the receiver to a regular object.
__ Bind(&convert_receiver_to_object);
- __ Push(receiver);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Mov(x0, receiver);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Mov(receiver, x0);
__ B(&push_receiver);
@@ -1532,8 +1611,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ Push(receiver);
// Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// At the end of the loop, the number of arguments is stored in 'current',
// represented as a smi.
@@ -1576,16 +1655,25 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
const int kFunctionOffset = kArgumentsOffset + kPointerSize;
-
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+ const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
+ const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
// Is x11 safe to use?
Register newTarget = x11;
Register args = x12;
Register function = x15;
+ Register construct_function = x1;
+
+ // Push the vector.
+ __ Ldr(construct_function,
+ FieldMemOperand(construct_function,
+ JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(construct_function,
+ FieldMemOperand(construct_function,
+ SharedFunctionInfo::kFeedbackVectorOffset));
+ __ Push(construct_function);
// If newTarget is not supplied, set it to constructor
Label validate_arguments;
@@ -1606,24 +1694,24 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
Generate_CheckStackOverflow(masm, kFunctionOffset, argc, kArgcIsSmiTagged);
- // Push current limit and index, constructor & newTarget
+ // Push current limit and index & constructor function as callee.
__ Mov(x1, 0); // Initial index.
- __ Ldr(newTarget, MemOperand(fp, kNewTargetOffset));
- __ Push(argc, x1, newTarget, function);
+ __ Push(argc, x1, function);
// Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
- __ Ldr(x1, MemOperand(fp, kFunctionOffset));
// Use undefined feedback vector
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+ __ Ldr(x1, MemOperand(fp, kFunctionOffset));
+ __ Ldr(x4, MemOperand(fp, kNewTargetOffset));
// Call the function.
CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
+ // Leave internal frame.
}
__ Drop(kStackSize);
__ Ret();
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index e67b4fd2be..716910ea91 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
+#include "src/arm64/frames-arm64.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
@@ -13,8 +12,8 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
-#include "src/jsregexp.h"
-#include "src/regexp-macro-assembler.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -36,7 +35,7 @@ static void InitializeArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -70,7 +69,7 @@ static void InitializeInternalArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -227,6 +226,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ Cmp(right_type, SIMD128_VALUE_TYPE);
+ __ B(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics, since
// we need to throw a TypeError. Smis have already been ruled out.
@@ -246,6 +248,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ Cmp(right_type, SIMD128_VALUE_TYPE);
+ __ B(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics,
// since we need to throw a TypeError. Smis and heap numbers have
@@ -645,26 +650,30 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
- if (cond == eq) {
- native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cond == eq && strict()) {
+ __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
} else {
- native =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- int ncr; // NaN compare result
- if ((cond == lt) || (cond == le)) {
- ncr = GREATER;
+ Builtins::JavaScript native;
+ if (cond == eq) {
+ native = Builtins::EQUALS;
} else {
- DCHECK((cond == gt) || (cond == ge)); // remaining cases
- ncr = LESS;
+ native =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if ((cond == lt) || (cond == le)) {
+ ncr = GREATER;
+ } else {
+ DCHECK((cond == gt) || (cond == ge)); // remaining cases
+ ncr = LESS;
+ }
+ __ Mov(x10, Smi::FromInt(ncr));
+ __ Push(x10);
}
- __ Mov(x10, Smi::FromInt(ncr));
- __ Push(x10);
- }
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
+ }
__ Bind(&miss);
GenerateMiss(masm);
@@ -1731,7 +1740,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// the runtime system.
__ Bind(&slow);
__ Push(key);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments, 1, 1);
}
@@ -2050,10 +2059,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Everything is fine, call runtime.
__ Push(receiver, key);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
- masm->isolate()),
- 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
__ Bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -2451,8 +2457,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Find the code object based on the assumptions above.
// kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
// of kPointerSize to reach the latter.
- DCHECK_EQ(JSRegExp::kDataOneByteCodeOffset + kPointerSize,
- JSRegExp::kDataUC16CodeOffset);
+ STATIC_ASSERT(JSRegExp::kDataOneByteCodeOffset + kPointerSize ==
+ JSRegExp::kDataUC16CodeOffset);
__ Mov(x10, kPointerSize);
// We will need the encoding later: Latin1 = 0x04
// UC16 = 0x00
@@ -2742,18 +2748,26 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
Register argc, Register function,
- Register feedback_vector,
- Register index) {
+ Register feedback_vector, Register index,
+ Register orig_construct, bool is_super) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(argc);
- __ Push(argc, function, feedback_vector, index);
+ if (is_super) {
+ __ Push(argc, function, feedback_vector, index, orig_construct);
+ } else {
+ __ Push(argc, function, feedback_vector, index);
+ }
DCHECK(feedback_vector.Is(x2) && index.Is(x3));
__ CallStub(stub);
- __ Pop(index, feedback_vector, function, argc);
+ if (is_super) {
+ __ Pop(orig_construct, index, feedback_vector, function, argc);
+ } else {
+ __ Pop(index, feedback_vector, function, argc);
+ }
__ SmiUntag(argc);
}
@@ -2761,17 +2775,19 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
Register function,
Register feedback_vector, Register index,
- Register scratch1, Register scratch2,
- Register scratch3) {
+ Register orig_construct, Register scratch1,
+ Register scratch2, Register scratch3,
+ bool is_super) {
ASM_LOCATION("GenerateRecordCallTarget");
DCHECK(!AreAliased(scratch1, scratch2, scratch3, argc, function,
- feedback_vector, index));
+ feedback_vector, index, orig_construct));
// Cache the called function in a feedback vector slot. Cache states are
// uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
// argc : number of arguments to the construct function
// function : the function to call
// feedback_vector : the feedback vector
// index : slot in feedback vector (smi)
+ // orig_construct : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2850,7 +2866,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, argc, function,
- feedback_vector, index);
+ feedback_vector, index, orig_construct,
+ is_super);
__ B(&done);
__ Bind(&not_array_function);
@@ -2858,7 +2875,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
CreateWeakCellStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, argc, function,
- feedback_vector, index);
+ feedback_vector, index, orig_construct, is_super);
__ Bind(&done);
}
@@ -2907,8 +2924,10 @@ static void EmitSlowCase(MacroAssembler* masm,
static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
// Wrap the receiver and patch it back onto the stack.
{ FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(x1, x3);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Push(x1);
+ __ Mov(x0, x3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(x1);
}
__ Poke(x0, argc * kPointerSize);
@@ -2985,7 +3004,8 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
- // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
+ // x3 : slot in feedback vector (Smi, for RecordCallTarget)
+ // x4 : original constructor (for IsSuperConstructorCall)
Register function = x1;
Label slow, non_function_call;
@@ -2997,7 +3017,8 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
&slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11);
+ GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12,
+ IsSuperConstructorCall());
__ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
if (FLAG_pretenuring_call_new) {
@@ -3020,9 +3041,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
if (IsSuperConstructorCall()) {
- __ Mov(x4, Operand(1 * kPointerSize));
- __ Add(x4, x4, Operand(x0, LSL, kPointerSizeLog2));
- __ Peek(x3, x4);
+ __ Mov(x3, x4);
} else {
__ Mov(x3, function);
}
@@ -3299,11 +3318,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(x1, x2, x3);
// Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
-
- ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
- __ CallExternalReference(miss, 3);
+ Runtime::FunctionId id = GetICState() == DEFAULT
+ ? Runtime::kCallIC_Miss
+ : Runtime::kCallIC_Customization_Miss;
+ __ CallRuntime(id, 3);
// Move result to edi and exit the internal frame.
__ Mov(x1, x0);
@@ -3672,7 +3690,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ Bind(&miss);
@@ -3744,9 +3762,6 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
Register stub_entry = x11;
{
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
-
FrameScope scope(masm, StackFrame::INTERNAL);
Register op = x10;
Register left = x1;
@@ -3758,7 +3773,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(left, right, op);
// Call the miss handler. This also pops the arguments.
- __ CallExternalReference(miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss, 3);
// Compute the entry point of the rewritten stub.
__ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
@@ -4004,7 +4019,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Ret();
__ Bind(&runtime);
- __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
__ bind(&single_char);
// x1: result_length
@@ -4212,7 +4227,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime.
// Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -4655,7 +4670,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
- false, receiver, name, feedback,
+ receiver, name, feedback,
receiver_map, scratch1, x7);
__ Bind(&miss);
@@ -4930,7 +4945,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(
__ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
// Scale the index by multiplying by the element size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name.
@@ -4999,7 +5014,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
Register entity_name = scratch0;
@@ -5090,7 +5105,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ And(index, mask, Operand(index, LSR, Name::kHashShift));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
__ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
@@ -5484,6 +5499,156 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context = cp;
+ Register result = x0;
+ Register slot = x2;
+ Label slow_case;
+
+ // Go up the context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ Ldr(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+ context = result;
+ }
+
+ // Load the PropertyCell value at the specified slot.
+ __ Add(result, context, Operand(slot, LSL, kPointerSizeLog2));
+ __ Ldr(result, ContextMemOperand(result));
+ __ Ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
+
+ // If the result is not the_hole, return. Otherwise, handle in the runtime.
+ __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &slow_case);
+ __ Ret();
+
+ // Fallback to runtime.
+ __ Bind(&slow_case);
+ __ SmiTag(slot);
+ __ Push(slot);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+}
+
+
+void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context = cp;
+ Register value = x0;
+ Register slot = x2;
+ Register context_temp = x10;
+ Register cell = x10;
+ Register cell_details = x11;
+ Register cell_value = x12;
+ Register cell_value_map = x13;
+ Register value_map = x14;
+ Label fast_heapobject_case, fast_smi_case, slow_case;
+
+ if (FLAG_debug_code) {
+ __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
+ __ Check(ne, kUnexpectedValue);
+ }
+
+ // Go up the context chain to the script context.
+ for (int i = 0; i < depth(); i++) {
+ __ Ldr(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+ context = context_temp;
+ }
+
+ // Load the PropertyCell at the specified slot.
+ __ Add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
+ __ Ldr(cell, ContextMemOperand(cell));
+
+ // Load PropertyDetails for the cell (actually only the cell_type and kind).
+ __ Ldr(cell_details,
+ UntagSmiFieldMemOperand(cell, PropertyCell::kDetailsOffset));
+ __ And(cell_details, cell_details,
+ PropertyDetails::PropertyCellTypeField::kMask |
+ PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask);
+
+ // Check if PropertyCell holds mutable data.
+ Label not_mutable_data;
+ __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kMutable) |
+ PropertyDetails::KindField::encode(kData));
+ __ B(ne, &not_mutable_data);
+ __ JumpIfSmi(value, &fast_smi_case);
+ __ Bind(&fast_heapobject_case);
+ __ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ // RecordWriteField clobbers the value register, so we copy it before the
+ // call.
+ __ Mov(x11, value);
+ __ RecordWriteField(cell, PropertyCell::kValueOffset, x11, x12,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Ret();
+
+ __ Bind(&not_mutable_data);
+ // Check if PropertyCell value matches the new value (relevant for Constant,
+ // ConstantType and Undefined cells).
+ Label not_same_value;
+ __ Ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ __ Cmp(cell_value, value);
+ __ B(ne, &not_same_value);
+
+ // Make sure the PropertyCell is not marked READ_ONLY.
+ __ Tst(cell_details, PropertyDetails::kAttributesReadOnlyMask);
+ __ B(ne, &slow_case);
+
+ if (FLAG_debug_code) {
+ Label done;
+ // This can only be true for Constant, ConstantType and Undefined cells,
+ // because we never store the_hole via this stub.
+ __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstant) |
+ PropertyDetails::KindField::encode(kData));
+ __ B(eq, &done);
+ __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData));
+ __ B(eq, &done);
+ __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kUndefined) |
+ PropertyDetails::KindField::encode(kData));
+ __ Check(eq, kUnexpectedValue);
+ __ Bind(&done);
+ }
+ __ Ret();
+ __ Bind(&not_same_value);
+
+ // Check if PropertyCell contains data with constant type (and is not
+ // READ_ONLY).
+ __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData));
+ __ B(ne, &slow_case);
+
+ // Now either both old and new values must be smis or both must be heap
+ // objects with same map.
+ Label value_is_heap_object;
+ __ JumpIfNotSmi(value, &value_is_heap_object);
+ __ JumpIfNotSmi(cell_value, &slow_case);
+ // Old and new values are smis, no need for a write barrier here.
+ __ Bind(&fast_smi_case);
+ __ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ __ Ret();
+
+ __ Bind(&value_is_heap_object);
+ __ JumpIfSmi(cell_value, &slow_case);
+
+ __ Ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
+ __ Ldr(value_map, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ Cmp(cell_value_map, value_map);
+ __ B(eq, &fast_heapobject_case);
+
+ // Fall back to the runtime.
+ __ Bind(&slow_case);
+ __ SmiTag(slot);
+ __ Push(slot, value);
+ __ TailCallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2, 1);
+}
+
+
// The number of register that CallApiFunctionAndReturn will need to save on
// the stack. The space for these registers need to be allocated in the
// ExitFrame before calling CallApiFunctionAndReturn.
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
index 2d1ef57f38..c381df713d 100644
--- a/deps/v8/src/arm64/codegen-arm64.cc
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/simulator-arm64.h"
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index fc7bef69e9..1529c647ff 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -5,6 +5,8 @@
#ifndef V8_ARM64_CONSTANTS_ARM64_H_
#define V8_ARM64_CONSTANTS_ARM64_H_
+#include "src/base/macros.h"
+#include "src/globals.h"
// Assert that this is an LP64 system.
STATIC_ASSERT(sizeof(int) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
@@ -762,20 +764,6 @@ enum LoadStorePairOffsetOp {
#undef LOAD_STORE_PAIR_OFFSET
};
-enum LoadStorePairNonTemporalOp {
- LoadStorePairNonTemporalFixed = 0x28000000,
- LoadStorePairNonTemporalFMask = 0x3B800000,
- LoadStorePairNonTemporalMask = 0xFFC00000,
- STNP_w = LoadStorePairNonTemporalFixed | STP_w,
- LDNP_w = LoadStorePairNonTemporalFixed | LDP_w,
- STNP_x = LoadStorePairNonTemporalFixed | STP_x,
- LDNP_x = LoadStorePairNonTemporalFixed | LDP_x,
- STNP_s = LoadStorePairNonTemporalFixed | STP_s,
- LDNP_s = LoadStorePairNonTemporalFixed | LDP_s,
- STNP_d = LoadStorePairNonTemporalFixed | STP_d,
- LDNP_d = LoadStorePairNonTemporalFixed | LDP_d
-};
-
// Load literal.
enum LoadLiteralOp {
LoadLiteralFixed = 0x18000000,
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
index 8258fbfde3..bde3e4aeb9 100644
--- a/deps/v8/src/arm64/cpu-arm64.cc
+++ b/deps/v8/src/arm64/cpu-arm64.cc
@@ -4,12 +4,11 @@
// CPU specific code for arm independent of OS goes here.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/utils-arm64.h"
#include "src/assembler.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm64/debug-arm64.cc b/deps/v8/src/arm64/debug-arm64.cc
deleted file mode 100644
index 2eec4466e1..0000000000
--- a/deps/v8/src/arm64/debug-arm64.cc
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/codegen.h"
-#include "src/debug.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void BreakLocation::SetDebugBreakAtReturn() {
- // Patch the code emitted by FullCodeGenerator::EmitReturnSequence, changing
- // the return from JS function sequence from
- // mov sp, fp
- // ldp fp, lr, [sp] #16
- // lrd ip0, [pc, #(3 * kInstructionSize)]
- // add sp, sp, ip0
- // ret
- // <number of paramters ...
- // ... plus one (64 bits)>
- // to a call to the debug break return code.
- // ldr ip0, [pc, #(3 * kInstructionSize)]
- // blr ip0
- // hlt kHltBadCode @ code should not return, catch if it does.
- // <debug break return code ...
- // ... entry point address (64 bits)>
-
- // The patching code must not overflow the space occupied by the return
- // sequence.
- STATIC_ASSERT(Assembler::kJSReturnSequenceInstructions >= 5);
- PatchingAssembler patcher(reinterpret_cast<Instruction*>(pc()), 5);
- byte* entry =
- debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry();
-
- // The first instruction of a patched return sequence must be a load literal
- // loading the address of the debug break return code.
- patcher.ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
- // TODO(all): check the following is correct.
- // The debug break return code will push a frame and call statically compiled
- // code. By using blr, even though control will not return after the branch,
- // this call site will be registered in the frame (lr being saved as the pc
- // of the next instruction to execute for this frame). The debugger can now
- // iterate on the frames to find call to debug break return code.
- patcher.blr(ip0);
- patcher.hlt(kHltBadCode);
- patcher.dc64(reinterpret_cast<int64_t>(entry));
-}
-
-
-void BreakLocation::SetDebugBreakAtSlot() {
- // Patch the code emitted by DebugCodegen::GenerateSlots, changing the debug
- // break slot code from
- // mov x0, x0 @ nop DEBUG_BREAK_NOP
- // mov x0, x0 @ nop DEBUG_BREAK_NOP
- // mov x0, x0 @ nop DEBUG_BREAK_NOP
- // mov x0, x0 @ nop DEBUG_BREAK_NOP
- // to a call to the debug slot code.
- // ldr ip0, [pc, #(2 * kInstructionSize)]
- // blr ip0
- // <debug break slot code ...
- // ... entry point address (64 bits)>
-
- // TODO(all): consider adding a hlt instruction after the blr as we don't
- // expect control to return here. This implies increasing
- // kDebugBreakSlotInstructions to 5 instructions.
-
- // The patching code must not overflow the space occupied by the return
- // sequence.
- STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4);
- PatchingAssembler patcher(reinterpret_cast<Instruction*>(pc()), 4);
- byte* entry =
- debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry();
-
- // The first instruction of a patched debug break slot must be a load literal
- // loading the address of the debug break slot code.
- patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
- // TODO(all): check the following is correct.
- // The debug break slot code will push a frame and call statically compiled
- // code. By using blr, event hough control will not return after the branch,
- // this call site will be registered in the frame (lr being saved as the pc
- // of the next instruction to execute for this frame). The debugger can now
- // iterate on the frames to find call to debug break slot code.
- patcher.blr(ip0);
- patcher.dc64(reinterpret_cast<int64_t>(entry));
-}
-
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs,
- Register scratch) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load padding words on stack.
- __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingValue));
- __ PushMultipleTimes(scratch, LiveEdit::kFramePaddingInitialSize);
- __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
- __ Push(scratch);
-
- // Any live values (object_regs and non_object_regs) in caller-saved
- // registers (or lr) need to be stored on the stack so that their values are
- // safely preserved for a call into C code.
- //
- // Also:
- // * object_regs may be modified during the C code by the garbage
- // collector. Every object register must be a valid tagged pointer or
- // SMI.
- //
- // * non_object_regs will be converted to SMIs so that the garbage
- // collector doesn't try to interpret them as pointers.
- //
- // TODO(jbramley): Why can't this handle callee-saved registers?
- DCHECK((~kCallerSaved.list() & object_regs) == 0);
- DCHECK((~kCallerSaved.list() & non_object_regs) == 0);
- DCHECK((object_regs & non_object_regs) == 0);
- DCHECK((scratch.Bit() & object_regs) == 0);
- DCHECK((scratch.Bit() & non_object_regs) == 0);
- DCHECK((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
- STATIC_ASSERT(kSmiValueSize == 32);
-
- CPURegList non_object_list =
- CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
- while (!non_object_list.IsEmpty()) {
- // Store each non-object register as two SMIs.
- Register reg = Register(non_object_list.PopLowestIndex());
- __ Lsr(scratch, reg, 32);
- __ SmiTagAndPush(scratch, reg);
-
- // Stack:
- // jssp[12]: reg[63:32]
- // jssp[8]: 0x00000000 (SMI tag & padding)
- // jssp[4]: reg[31:0]
- // jssp[0]: 0x00000000 (SMI tag & padding)
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(static_cast<unsigned>(kSmiShift) == kWRegSizeInBits);
- }
-
- if (object_regs != 0) {
- __ PushXRegList(object_regs);
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ Mov(x0, 0); // No arguments.
- __ Mov(x1, ExternalReference::debug_break(masm->isolate()));
-
- CEntryStub stub(masm->isolate(), 1);
- __ CallStub(&stub);
-
- // Restore the register values from the expression stack.
- if (object_regs != 0) {
- __ PopXRegList(object_regs);
- }
-
- non_object_list =
- CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
- while (!non_object_list.IsEmpty()) {
- // Load each non-object register from two SMIs.
- // Stack:
- // jssp[12]: reg[63:32]
- // jssp[8]: 0x00000000 (SMI tag & padding)
- // jssp[4]: reg[31:0]
- // jssp[0]: 0x00000000 (SMI tag & padding)
- Register reg = Register(non_object_list.PopHighestIndex());
- __ Pop(scratch, reg);
- __ Bfxil(reg, scratch, 32, 32);
- }
-
- // Don't bother removing padding bytes pushed on the stack
- // as the frame is going to be restored right away.
-
- // Leave the internal frame.
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ Mov(scratch, after_break_target);
- __ Ldr(scratch, MemOperand(scratch));
- __ Br(scratch);
-}
-
-
-void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallICStub
- // ----------- S t a t e -------------
- // -- x1 : function
- // -- x3 : slot in feedback array
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, x1.Bit() | x3.Bit(), 0, x10);
-}
-
-
-void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // In places other than IC call sites it is expected that r0 is TOS which
- // is an object - this is not generally the case so this should be used with
- // care.
- Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
-}
-
-
-void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-arm64.cc).
- // ----------- S t a t e -------------
- // -- x1 : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, x1.Bit(), 0, x10);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-arm64.cc).
- // ----------- S t a t e -------------
- // -- x0 : number of arguments (not smi)
- // -- x1 : constructor function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, x1.Bit(), x0.Bit(), x10);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
- MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-arm64.cc).
- // ----------- S t a t e -------------
- // -- x0 : number of arguments (not smi)
- // -- x1 : constructor function
- // -- x2 : feedback array
- // -- x3 : feedback slot (smi)
- // -----------------------------------
- Generate_DebugBreakCallHelper(
- masm, x1.Bit() | x2.Bit() | x3.Bit(), x0.Bit(), x10);
-}
-
-
-void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction. Avoid emitting
- // the constant pool in the debug break slot code.
- InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
-
- __ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
- __ nop(Assembler::DEBUG_BREAK_NOP);
- }
-}
-
-
-void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0, x10);
-}
-
-
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.AcquireX();
-
- __ Mov(scratch, restarter_frame_function_slot);
- __ Str(xzr, MemOperand(scratch));
-
- // We do not know our frame height, but set sp based on fp.
- __ Sub(masm->StackPointer(), fp, kPointerSize);
- __ AssertStackConsistency();
-
- __ Pop(x1, fp, lr); // Function, Frame, Return address.
-
- // Load context from the function.
- __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
-
- // Get function code.
- __ Ldr(scratch, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(scratch, FieldMemOperand(scratch, SharedFunctionInfo::kCodeOffset));
- __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
-
- // Re-run JSFunction, x1 is function, cp is context.
- __ Br(scratch);
-}
-
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/decoder-arm64-inl.h b/deps/v8/src/arm64/decoder-arm64-inl.h
index 5dd2fd9cc0..c29f2d3c5e 100644
--- a/deps/v8/src/arm64/decoder-arm64-inl.h
+++ b/deps/v8/src/arm64/decoder-arm64-inl.h
@@ -231,7 +231,8 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
if (instr->Mask(0xC4400000) == 0xC0400000) {
V::VisitUnallocated(instr);
} else {
- V::VisitLoadStorePairNonTemporal(instr);
+ // Nontemporals are unimplemented.
+ V::VisitUnimplemented(instr);
}
} else {
V::VisitLoadStorePairPostIndex(instr);
diff --git a/deps/v8/src/arm64/decoder-arm64.cc b/deps/v8/src/arm64/decoder-arm64.cc
index 08aab4286e..56b3e0255e 100644
--- a/deps/v8/src/arm64/decoder-arm64.cc
+++ b/deps/v8/src/arm64/decoder-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/decoder-arm64.h"
diff --git a/deps/v8/src/arm64/decoder-arm64.h b/deps/v8/src/arm64/decoder-arm64.h
index af6bcc6f4f..6140bc2818 100644
--- a/deps/v8/src/arm64/decoder-arm64.h
+++ b/deps/v8/src/arm64/decoder-arm64.h
@@ -33,7 +33,6 @@ namespace internal {
V(LoadStorePairPostIndex) \
V(LoadStorePairOffset) \
V(LoadStorePairPreIndex) \
- V(LoadStorePairNonTemporal) \
V(LoadLiteral) \
V(LoadStoreUnscaledOffset) \
V(LoadStorePostIndex) \
diff --git a/deps/v8/src/arm64/delayed-masm-arm64.cc b/deps/v8/src/arm64/delayed-masm-arm64.cc
index 77ad79199e..e86f10262f 100644
--- a/deps/v8/src/arm64/delayed-masm-arm64.cc
+++ b/deps/v8/src/arm64/delayed-masm-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/delayed-masm-arm64.h"
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 41a87643f2..65fb93e53c 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
+#include "src/arm64/frames-arm64.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/safepoint-table.h"
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index 232dfce5f0..fb3b692d08 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -7,8 +7,6 @@
#include <stdio.h>
#include <string.h>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/decoder-arm64-inl.h"
@@ -917,25 +915,6 @@ void Disassembler::VisitLoadStorePairOffset(Instruction* instr) {
}
-void Disassembler::VisitLoadStorePairNonTemporal(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form;
-
- switch (instr->Mask(LoadStorePairNonTemporalMask)) {
- case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
- case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
- case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
- case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
- case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
- case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
- case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
- case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
- default: form = "(LoadStorePairNonTemporal)";
- }
- Format(instr, mnemonic, form);
-}
-
-
void Disassembler::VisitFPCompare(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Fn, 'Fm";
diff --git a/deps/v8/src/arm64/disasm-arm64.h b/deps/v8/src/arm64/disasm-arm64.h
index 8cd3b80dbe..c6b189bf97 100644
--- a/deps/v8/src/arm64/disasm-arm64.h
+++ b/deps/v8/src/arm64/disasm-arm64.h
@@ -5,8 +5,6 @@
#ifndef V8_ARM64_DISASM_ARM64_H
#define V8_ARM64_DISASM_ARM64_H
-#include "src/v8.h"
-
#include "src/arm64/decoder-arm64.h"
#include "src/arm64/instructions-arm64.h"
#include "src/globals.h"
diff --git a/deps/v8/src/arm64/frames-arm64.cc b/deps/v8/src/arm64/frames-arm64.cc
index 73c678aaa6..d3dea408bd 100644
--- a/deps/v8/src/arm64/frames-arm64.cc
+++ b/deps/v8/src/arm64/frames-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64-inl.h"
diff --git a/deps/v8/src/arm64/frames-arm64.h b/deps/v8/src/arm64/frames-arm64.h
index 963dc3e025..9e6551783d 100644
--- a/deps/v8/src/arm64/frames-arm64.h
+++ b/deps/v8/src/arm64/frames-arm64.h
@@ -63,12 +63,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
} } // namespace v8::internal
#endif // V8_ARM64_FRAMES_ARM64_H_
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
index 789268430d..60243d8306 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#define ARM64_DEFINE_FP_STATICS
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
index 9bd02f45ab..7a8e2f4ee1 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -364,12 +364,6 @@ void Instrument::VisitLoadStorePairPreIndex(Instruction* instr) {
}
-void Instrument::VisitLoadStorePairNonTemporal(Instruction* instr) {
- Update();
- InstrumentLoadStorePair(instr);
-}
-
-
void Instrument::VisitLoadLiteral(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Load Literal");
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 62e6f2a79e..b49b457124 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/interface-descriptors.h"
@@ -36,7 +34,11 @@ const Register VectorStoreICDescriptor::VectorRegister() { return x3; }
const Register StoreTransitionDescriptor::MapRegister() { return x3; }
-const Register ElementTransitionAndStoreDescriptor::MapRegister() { return x3; }
+const Register LoadGlobalViaContextDescriptor::SlotRegister() { return x2; }
+
+
+const Register StoreGlobalViaContextDescriptor::SlotRegister() { return x2; }
+const Register StoreGlobalViaContextDescriptor::ValueRegister() { return x0; }
const Register InstanceofDescriptor::left() {
@@ -68,6 +70,14 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
+void StoreTransitionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ MapRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: function info
@@ -92,6 +102,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
}
+// static
+const Register ToObjectDescriptor::ReceiverRegister() { return x0; }
+
+
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value
@@ -181,10 +195,11 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
- // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
+ // x3 : slot in feedback vector (Smi, for RecordCallTarget)
+ // x4 : original constructor (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {x0, x1, x2};
+ Register registers[] = {x0, x1, x4, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -389,11 +404,22 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
+void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ x1, // math rounding function
+ x3, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void MathRoundVariantCallFromOptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
x1, // math rounding function
x3, // vector slot id
+ x4, // type vector
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm64/lithium-arm64.cc b/deps/v8/src/arm64/lithium-arm64.cc
index fef51c669b..4b8208180e 100644
--- a/deps/v8/src/arm64/lithium-arm64.cc
+++ b/deps/v8/src/arm64/lithium-arm64.cc
@@ -4,8 +4,6 @@
#include <sstream>
-#include "src/v8.h"
-
#include "src/arm64/lithium-codegen-arm64.h"
#include "src/hydrogen-osr.h"
#include "src/lithium-inl.h"
@@ -296,6 +294,11 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
+void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d", depth(), slot_index());
+}
+
+
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -315,6 +318,12 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
+void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
+ value()->PrintTo(stream);
+}
+
+
void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if string_compare(");
left()->PrintTo(stream);
@@ -887,8 +896,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsExternal()) {
- DCHECK(instr->left()->representation().IsExternal());
- DCHECK(instr->right()->representation().IsInteger32());
+ DCHECK(instr->IsConsistentExternalRepresentation());
DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
@@ -1203,7 +1211,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (to.IsSmi()) {
LOperand* value = UseRegisterAtStart(val);
LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
- if (val->CheckFlag(HInstruction::kUint32)) {
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
return result;
@@ -1703,13 +1711,22 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
+ HLoadGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ DCHECK(instr->slot_index() > 0);
+ LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
+ return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
DCHECK(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* elements = UseRegister(instr->elements());
LOperand* key = UseRegisterOrConstant(instr->key());
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
if (instr->representation().IsDouble()) {
LOperand* temp = (!instr->key()->IsConstant() ||
instr->RequiresHoleCheck())
@@ -1743,8 +1760,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
LInstruction* result = DefineAsRegister(
new(zone()) LLoadKeyedExternal(elements, key, temp));
- if ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
- elements_kind == UINT32_ELEMENTS) &&
+ if (elements_kind == UINT32_ELEMENTS &&
!instr->CheckFlag(HInstruction::kUint32)) {
result = AssignEnvironment(result);
}
@@ -2348,7 +2364,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* elements = NULL;
LOperand* val = NULL;
- if (!instr->is_typed_elements() &&
+ if (!instr->is_fixed_typed_array() &&
instr->value()->representation().IsTagged() &&
instr->NeedsWriteBarrier()) {
// RecordWrite() will clobber all registers.
@@ -2361,15 +2377,12 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
temp = instr->key()->IsConstant() ? NULL : TempRegister();
}
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DCHECK((instr->value()->representation().IsInteger32() &&
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(instr->elements_kind())));
- DCHECK((instr->is_fixed_typed_array() &&
- instr->elements()->representation().IsTagged()) ||
- (instr->is_external() &&
- instr->elements()->representation().IsExternal()));
+ DCHECK(instr->elements()->representation().IsExternal());
return new(zone()) LStoreKeyedExternal(elements, key, val, temp);
} else if (instr->value()->representation().IsDouble()) {
@@ -2457,6 +2470,19 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
+ HStoreGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* value = UseFixed(instr->value(),
+ StoreGlobalViaContextDescriptor::ValueRegister());
+ DCHECK(instr->slot_index() > 0);
+
+ LStoreGlobalViaContext* result =
+ new (zone()) LStoreGlobalViaContext(context, value);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), x1);
diff --git a/deps/v8/src/arm64/lithium-arm64.h b/deps/v8/src/arm64/lithium-arm64.h
index 4507c07591..70337778f4 100644
--- a/deps/v8/src/arm64/lithium-arm64.h
+++ b/deps/v8/src/arm64/lithium-arm64.h
@@ -104,6 +104,7 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
+ V(LoadGlobalViaContext) \
V(LoadKeyedExternal) \
V(LoadKeyedFixed) \
V(LoadKeyedFixedDouble) \
@@ -152,6 +153,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
+ V(StoreGlobalViaContext) \
V(StoreKeyedExternal) \
V(StoreKeyedFixed) \
V(StoreKeyedFixedDouble) \
@@ -1673,6 +1675,22 @@ class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
};
+class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ LOperand* context() { return inputs_[0]; }
+
+ int depth() const { return hydrogen()->depth(); }
+ int slot_index() const { return hydrogen()->slot_index(); }
+};
+
+
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1748,7 +1766,7 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
+ TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
};
@@ -2455,6 +2473,28 @@ class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
};
+class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreGlobalViaContext(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
+ "store-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int depth() { return hydrogen()->depth(); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
template<int T>
class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
public:
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.cc b/deps/v8/src/arm64/lithium-codegen-arm64.cc
index 074926b83b..3dff64cbe8 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.cc
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
+#include "src/arm64/frames-arm64.h"
#include "src/arm64/lithium-codegen-arm64.h"
#include "src/arm64/lithium-gap-resolver-arm64.h"
#include "src/base/bits.h"
@@ -276,15 +275,23 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
if (op->IsStackSlot()) {
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
if (is_tagged) {
- translation->StoreStackSlot(op->index());
+ translation->StoreStackSlot(index);
} else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
+ translation->StoreUint32StackSlot(index);
} else {
- translation->StoreInt32StackSlot(op->index());
+ translation->StoreInt32StackSlot(index);
}
} else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
+ translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -1476,9 +1483,14 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoAddE(LAddE* instr) {
Register result = ToRegister(instr->result());
Register left = ToRegister(instr->left());
- Operand right = (instr->right()->IsConstantOperand())
- ? ToInteger32(LConstantOperand::cast(instr->right()))
- : Operand(ToRegister32(instr->right()), SXTW);
+ Operand right = Operand(x0); // Dummy initialization.
+ if (instr->hydrogen()->external_add_type() == AddOfExternalAndTagged) {
+ right = Operand(ToRegister(instr->right()));
+ } else if (instr->right()->IsConstantOperand()) {
+ right = ToInteger32(LConstantOperand::cast(instr->right()));
+ } else {
+ right = Operand(ToRegister32(instr->right()), SXTW);
+ }
DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
__ Add(result, left, right);
@@ -1926,6 +1938,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ B(eq, true_label);
}
+ if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ // SIMD value -> true.
+ __ CompareInstanceType(map, scratch, SIMD128_VALUE_TYPE);
+ __ B(eq, true_label);
+ }
+
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
Label not_heap_number;
__ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
@@ -3362,13 +3380,31 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->result()).Is(x0));
__ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
- PREMONOMORPHIC).code();
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+ SLOPPY, PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->result()).is(x0));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ Mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ Handle<Code> stub =
+ CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+}
+
+
MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
Register key,
Register base,
@@ -3426,42 +3462,33 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
elements_kind,
instr->base_offset());
- if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
- (elements_kind == FLOAT32_ELEMENTS)) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
DoubleRegister result = ToDoubleRegister(instr->result());
__ Ldr(result.S(), mem_op);
__ Fcvt(result, result.S());
- } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
- (elements_kind == FLOAT64_ELEMENTS)) {
+ } else if (elements_kind == FLOAT64_ELEMENTS) {
DoubleRegister result = ToDoubleRegister(instr->result());
__ Ldr(result, mem_op);
} else {
Register result = ToRegister(instr->result());
switch (elements_kind) {
- case EXTERNAL_INT8_ELEMENTS:
case INT8_ELEMENTS:
__ Ldrsb(result, mem_op);
break;
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
__ Ldrb(result, mem_op);
break;
- case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
__ Ldrsh(result, mem_op);
break;
- case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
__ Ldrh(result, mem_op);
break;
- case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
__ Ldrsw(result, mem_op);
break;
- case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
__ Ldr(result.W(), mem_op);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
@@ -3472,8 +3499,6 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
@@ -3692,7 +3717,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -5017,8 +5042,8 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
// here.
__ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
__ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
- __ Push(cp, scratch1, scratch2); // The context is the first argument.
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ __ Push(scratch1, scratch2);
+ CallRuntime(Runtime::kDeclareGlobals, 2, instr);
}
@@ -5148,44 +5173,33 @@ void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
elements_kind,
instr->base_offset());
- if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
- (elements_kind == FLOAT32_ELEMENTS)) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
DoubleRegister value = ToDoubleRegister(instr->value());
DoubleRegister dbl_scratch = double_scratch();
__ Fcvt(dbl_scratch.S(), value);
__ Str(dbl_scratch.S(), dst);
- } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
- (elements_kind == FLOAT64_ELEMENTS)) {
+ } else if (elements_kind == FLOAT64_ELEMENTS) {
DoubleRegister value = ToDoubleRegister(instr->value());
__ Str(value, dst);
} else {
Register value = ToRegister(instr->value());
switch (elements_kind) {
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_INT8_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
case INT8_ELEMENTS:
__ Strb(value, dst);
break;
- case EXTERNAL_INT16_ELEMENTS:
- case EXTERNAL_UINT16_ELEMENTS:
case INT16_ELEMENTS:
case UINT16_ELEMENTS:
__ Strh(value, dst);
break;
- case EXTERNAL_INT32_ELEMENTS:
- case EXTERNAL_UINT32_ELEMENTS:
case INT32_ELEMENTS:
case UINT32_ELEMENTS:
__ Str(value.W(), dst);
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -5507,6 +5521,30 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->value())
+ .is(StoreGlobalViaContextDescriptor::ValueRegister()));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ Mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
+ isolate(), depth, instr->language_mode())
+ .code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
+ __ CallRuntime(is_strict(instr->language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
+}
+
+
void LCodeGen::DoStringAdd(LStringAdd* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->left()).Is(x1));
@@ -5907,10 +5945,8 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register scratch = ToRegister(instr->temp2());
__ JumpIfSmi(value, false_label);
- __ JumpIfObjectType(
- value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
- __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
+ __ CompareObjectType(value, map, scratch, FIRST_NONSTRING_TYPE);
+ EmitBranch(instr, lt);
} else if (String::Equals(type_name, factory->symbol_string())) {
DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
@@ -5962,6 +5998,20 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
__ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(type_name, factory->type##_string())) { \
+ DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); \
+ Register map = ToRegister(instr->temp1()); \
+ \
+ __ JumpIfSmi(value, false_label); \
+ __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); \
+ __ CompareRoot(map, Heap::k##Type##MapRootIndex); \
+ EmitBranch(instr, eq);
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
+
} else {
__ B(false_label);
}
diff --git a/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc b/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
index 7d01f792bc..1520fa1888 100644
--- a/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
+++ b/deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/arm64/delayed-masm-arm64-inl.h"
#include "src/arm64/lithium-codegen-arm64.h"
#include "src/arm64/lithium-gap-resolver-arm64.h"
diff --git a/deps/v8/src/arm64/lithium-gap-resolver-arm64.h b/deps/v8/src/arm64/lithium-gap-resolver-arm64.h
index 2eb651b924..8866db4c94 100644
--- a/deps/v8/src/arm64/lithium-gap-resolver-arm64.h
+++ b/deps/v8/src/arm64/lithium-gap-resolver-arm64.h
@@ -5,8 +5,6 @@
#ifndef V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
#define V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
-#include "src/v8.h"
-
#include "src/arm64/delayed-masm-arm64.h"
#include "src/lithium.h"
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index b691e21813..445513bf5a 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -869,15 +869,6 @@ void MacroAssembler::Isb() {
}
-void MacroAssembler::Ldnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& src) {
- DCHECK(allow_macro_instructions_);
- DCHECK(!AreAliased(rt, rt2));
- ldnp(rt, rt2, src);
-}
-
-
void MacroAssembler::Ldr(const CPURegister& rt, const Immediate& imm) {
DCHECK(allow_macro_instructions_);
ldr(rt, imm);
@@ -1134,14 +1125,6 @@ void MacroAssembler::Umull(const Register& rd, const Register& rn,
}
-void MacroAssembler::Stnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& dst) {
- DCHECK(allow_macro_instructions_);
- stnp(rt, rt2, dst);
-}
-
-
void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions_);
DCHECK(!rd.IsZero());
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index c7d6797416..586df33c4d 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
+#include "src/arm64/frames-arm64.h"
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -907,6 +906,25 @@ void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
}
+void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3,
+ const CPURegister& dst4, const CPURegister& dst5,
+ const CPURegister& dst6, const CPURegister& dst7) {
+ // It is not valid to pop into the same register more than once in one
+ // instruction, not even into the zero register.
+ DCHECK(!AreAliased(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
+ DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
+ DCHECK(dst0.IsValid());
+
+ int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid();
+ int size = dst0.SizeInBytes();
+
+ PopHelper(4, size, dst0, dst1, dst2, dst3);
+ PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
+ PopPostamble(count, size);
+}
+
+
void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
int size = src0.SizeInBytes() + src1.SizeInBytes();
@@ -3030,10 +3048,10 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
void MacroAssembler::DebugBreak() {
Mov(x0, 0);
- Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
+ Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
CEntryStub ces(isolate(), 1);
DCHECK(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
}
@@ -3223,26 +3241,6 @@ void MacroAssembler::Allocate(Register object_size,
}
-void MacroAssembler::UndoAllocationInNewSpace(Register object,
- Register scratch) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- Bic(object, object, kHeapObjectTagMask);
-#ifdef DEBUG
- // Check that the object un-allocated is below the current top.
- Mov(scratch, new_space_allocation_top);
- Ldr(scratch, MemOperand(scratch));
- Cmp(object, scratch);
- Check(lt, kUndoAllocationOfNonAllocatedMemory);
-#endif
- // Write the address of the object to un-allocate as the current top.
- Mov(scratch, new_space_allocation_top);
- Str(object, MemOperand(scratch));
-}
-
-
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -4417,21 +4415,29 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
Register scratch1,
Label* found) {
DCHECK(!AreAliased(object, scratch0, scratch1));
- Factory* factory = isolate()->factory();
Register current = scratch0;
- Label loop_again;
+ Label loop_again, end;
// Scratch contains elements pointer.
Mov(current, object);
+ Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ CompareAndBranch(current, Heap::kNullValueRootIndex, eq, &end);
// Loop based on the map going up the prototype chain.
Bind(&loop_again);
Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ CompareInstanceType(current, scratch1, JS_OBJECT_TYPE);
+ B(lo, found);
Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
- CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
+ CompareAndBranch(current, Heap::kNullValueRootIndex, ne, &loop_again);
+
+ Bind(&end);
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 7854ff0e52..76e2fdb3fb 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -7,11 +7,10 @@
#include <vector>
+#include "src/arm64/assembler-arm64.h"
#include "src/bailout-reason.h"
-#include "src/globals.h"
-
-#include "src/arm64/assembler-arm64-inl.h"
#include "src/base/bits.h"
+#include "src/globals.h"
// Simulator specific helpers.
#if USE_SIMULATOR
@@ -34,6 +33,20 @@
namespace v8 {
namespace internal {
+// Give alias names to registers for calling conventions.
+// TODO(titzer): arm64 is a pain for aliasing; get rid of these macros
+#define kReturnRegister0 x0
+#define kReturnRegister1 x1
+#define kJSFunctionRegister x1
+#define kContextRegister cp
+#define kInterpreterAccumulatorRegister x0
+#define kInterpreterRegisterFileRegister x18
+#define kInterpreterBytecodeOffsetRegister x19
+#define kInterpreterBytecodeArrayRegister x20
+#define kInterpreterDispatchTableRegister x21
+#define kRuntimeCallFunctionRegister x1
+#define kRuntimeCallArgCountRegister x0
+
#define LS_MACRO_LIST(V) \
V(Ldrb, Register&, rt, LDRB_w) \
V(Strb, Register&, rt, STRB_w) \
@@ -569,6 +582,10 @@ class MacroAssembler : public Assembler {
const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
+ void Pop(const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3,
+ const CPURegister& dst4, const CPURegister& dst5 = NoReg,
+ const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
void Push(const Register& src0, const FPRegister& src1);
// Alternative forms of Push and Pop, taking a RegList or CPURegList that
@@ -1305,12 +1322,6 @@ class MacroAssembler : public Assembler {
Label* gc_required,
AllocationFlags flags);
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. The caller must make sure that no pointers
- // are left to the object(s) no longer allocated as they would be invalid when
- // allocation is undone.
- void UndoAllocationInNewSpace(Register object, Register scratch);
-
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -1771,7 +1782,7 @@ class MacroAssembler : public Assembler {
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
// The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
void RecordWriteField(
Register object,
int offset,
@@ -2235,7 +2246,7 @@ class UseScratchRegisterScope {
};
-inline MemOperand ContextMemOperand(Register context, int index) {
+inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index 29d3ea2419..83fd164bb6 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -5,7 +5,6 @@
#include <stdlib.h>
#include <cmath>
#include <cstdarg>
-#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
@@ -223,6 +222,9 @@ int64_t Simulator::CallRegExp(byte* entry,
void Simulator::CheckPCSComplianceAndRun() {
+ // Adjust JS-based stack limit to C-based stack limit.
+ isolate_->stack_guard()->AdjustStackLimitForSimulator();
+
#ifdef DEBUG
CHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count());
CHECK_EQ(kNumberOfCalleeSavedFPRegisters, kCalleeSavedFP.Count());
@@ -333,9 +335,15 @@ uintptr_t Simulator::PopAddress() {
// Returns the limit of the stack area to enable checking for stack overflows.
-uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
- // pushing values.
+uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
+ // The simulator uses a separate JS stack. If we have exhausted the C stack,
+ // we also drop down the JS limit to reflect the exhaustion on the JS stack.
+ if (GetCurrentStackPosition() < c_limit) {
+ return reinterpret_cast<uintptr_t>(get_sp());
+ }
+
+ // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
+ // to prevent overrunning the stack when pushing values.
return stack_limit_ + 1024;
}
@@ -1676,11 +1684,6 @@ void Simulator::VisitLoadStorePairPostIndex(Instruction* instr) {
}
-void Simulator::VisitLoadStorePairNonTemporal(Instruction* instr) {
- LoadStorePairHelper(instr, Offset);
-}
-
-
void Simulator::LoadStorePairHelper(Instruction* instr,
AddrMode addrmode) {
unsigned rt = instr->Rt();
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 64fceb3451..6ff0013ebd 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -8,8 +8,6 @@
#include <stdarg.h>
#include <vector>
-#include "src/v8.h"
-
#include "src/allocation.h"
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/decoder-arm64.h"
@@ -268,7 +266,7 @@ class Simulator : public DecoderVisitor {
uintptr_t PopAddress();
// Accessor to the internal simulator stack area.
- uintptr_t StackLimit() const;
+ uintptr_t StackLimit(uintptr_t c_limit) const;
void ResetState();
@@ -403,7 +401,7 @@ class Simulator : public DecoderVisitor {
}
Instruction* lr() { return reg<Instruction*>(kLinkRegCode); }
- Address get_sp() { return reg<Address>(31, Reg31IsStackPointer); }
+ Address get_sp() const { return reg<Address>(31, Reg31IsStackPointer); }
template<typename T>
T fpreg(unsigned code) const {
@@ -884,13 +882,14 @@ class Simulator : public DecoderVisitor {
// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code.
-// See also 'class SimulatorStack' in arm/simulator-arm.h.
+// the C-based native code. The JS-based limit normally points near the end of
+// the simulator stack. When the C-based limit is exhausted we reflect that by
+// lowering the JS-based limit as well, to make stack checks trigger.
class SimulatorStack : public v8::internal::AllStatic {
public:
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit();
+ return Simulator::current(isolate)->StackLimit(c_limit);
}
static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
diff --git a/deps/v8/src/arm64/utils-arm64.h b/deps/v8/src/arm64/utils-arm64.h
index eee614d288..da91fd5d60 100644
--- a/deps/v8/src/arm64/utils-arm64.h
+++ b/deps/v8/src/arm64/utils-arm64.h
@@ -6,7 +6,6 @@
#define V8_ARM64_UTILS_ARM64_H_
#include <cmath>
-#include "src/v8.h"
#include "src/arm64/constants-arm64.h"
diff --git a/deps/v8/src/array-iterator.js b/deps/v8/src/array-iterator.js
index 8efabe50e3..9698a07b04 100644
--- a/deps/v8/src/array-iterator.js
+++ b/deps/v8/src/array-iterator.js
@@ -45,7 +45,7 @@ function ArrayIterator() {}
// 15.4.5.1 CreateArrayIterator Abstract Operation
function CreateArrayIterator(array, kind) {
- var object = $toObject(array);
+ var object = TO_OBJECT(array);
var iterator = new ArrayIterator;
SET_PRIVATE(iterator, arrayIteratorObjectSymbol, object);
SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, 0);
@@ -68,7 +68,7 @@ function ArrayIteratorIterator() {
// 15.4.5.2.2 ArrayIterator.prototype.next( )
function ArrayIteratorNext() {
- var iterator = $toObject(this);
+ var iterator = TO_OBJECT(this);
if (!HAS_DEFINED_PRIVATE(iterator, arrayIteratorNextIndexSymbol)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
@@ -138,6 +138,10 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
'keys', ArrayKeys
]);
+// TODO(adam): Remove this call once 'values' is in the above
+// InstallFunctions block, as it'll be redundant.
+utils.SetFunctionName(ArrayValues, 'values');
+
%AddNamedProperty(GlobalArray.prototype, symbolIterator, ArrayValues,
DONT_ENUM);
@@ -160,4 +164,8 @@ utils.Export(function(to) {
$arrayValues = ArrayValues;
+utils.ExportToRuntime(function(to) {
+ to.ArrayValues = ArrayValues;
+});
+
})
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index 7baabf8361..4520a34e35 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -19,16 +19,17 @@ var $arrayUnshift;
// -------------------------------------------------------------------
// Imports
+var Delete;
var GlobalArray = global.Array;
var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
-
-var Delete;
var MathMin;
var ObjectHasOwnProperty;
var ObjectIsFrozen;
var ObjectIsSealed;
var ObjectToString;
+var ToNumber;
+var ToString;
utils.Import(function(from) {
Delete = from.Delete;
@@ -37,6 +38,8 @@ utils.Import(function(from) {
ObjectIsFrozen = from.ObjectIsFrozen;
ObjectIsSealed = from.ObjectIsSealed;
ObjectToString = from.ObjectToString;
+ ToNumber = from.ToNumber;
+ ToString = from.ToString;
});
// -------------------------------------------------------------------
@@ -216,7 +219,7 @@ function ConvertToString(x) {
// Assumes x is a non-string.
if (IS_NUMBER(x)) return %_NumberToString(x);
if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
- return (IS_NULL_OR_UNDEFINED(x)) ? '' : $toString($defaultString(x));
+ return (IS_NULL_OR_UNDEFINED(x)) ? '' : ToString($defaultString(x));
}
@@ -227,8 +230,8 @@ function ConvertToLocaleString(e) {
// According to ES5, section 15.4.4.3, the toLocaleString conversion
// must throw a TypeError if ToObject(e).toLocaleString isn't
// callable.
- var e_obj = $toObject(e);
- return $toString(e_obj.toLocaleString());
+ var e_obj = TO_OBJECT(e);
+ return ToString(e_obj.toLocaleString());
}
}
@@ -388,7 +391,7 @@ function ArrayToString() {
}
array = this;
} else {
- array = $toObject(this);
+ array = TO_OBJECT(this);
func = array.join;
}
if (!IS_SPEC_FUNCTION(func)) {
@@ -406,7 +409,7 @@ function InnerArrayToLocaleString(array, length) {
function ArrayToLocaleString() {
- var array = $toObject(this);
+ var array = TO_OBJECT(this);
var arrayLen = array.length;
return InnerArrayToLocaleString(array, arrayLen);
}
@@ -437,7 +440,7 @@ function InnerArrayJoin(separator, array, length) {
function ArrayJoin(separator) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join");
- var array = TO_OBJECT_INLINE(this);
+ var array = TO_OBJECT(this);
var length = TO_UINT32(array.length);
return InnerArrayJoin(separator, array, length);
@@ -466,7 +469,7 @@ function ObservedArrayPop(n) {
function ArrayPop() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.pop");
- var array = TO_OBJECT_INLINE(this);
+ var array = TO_OBJECT(this);
var n = TO_UINT32(array.length);
if (n == 0) {
array.length = n;
@@ -512,7 +515,7 @@ function ArrayPush() {
if (%IsObserved(this))
return ObservedArrayPush.apply(this, arguments);
- var array = TO_OBJECT_INLINE(this);
+ var array = TO_OBJECT(this);
var n = TO_UINT32(array.length);
var m = %_ArgumentsLength();
@@ -532,7 +535,7 @@ function ArrayPush() {
function ArrayConcatJS(arg1) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.concat");
- var array = $toObject(this);
+ var array = TO_OBJECT(this);
var arg_count = %_ArgumentsLength();
var arrays = new InternalArray(1 + arg_count);
arrays[0] = array;
@@ -587,14 +590,25 @@ function SparseReverse(array, len) {
}
}
-
-function InnerArrayReverse(array, len) {
+function PackedArrayReverse(array, len) {
var j = len - 1;
for (var i = 0; i < j; i++, j--) {
var current_i = array[i];
- if (!IS_UNDEFINED(current_i) || i in array) {
- var current_j = array[j];
- if (!IS_UNDEFINED(current_j) || j in array) {
+ var current_j = array[j];
+ array[i] = current_j;
+ array[j] = current_i;
+ }
+ return array;
+}
+
+
+function GenericArrayReverse(array, len) {
+ var j = len - 1;
+ for (var i = 0; i < j; i++, j--) {
+ if (i in array) {
+ var current_i = array[i];
+ if (j in array) {
+ var current_j = array[j];
array[i] = current_j;
array[j] = current_i;
} else {
@@ -602,8 +616,8 @@ function InnerArrayReverse(array, len) {
delete array[i];
}
} else {
- var current_j = array[j];
- if (!IS_UNDEFINED(current_j) || j in array) {
+ if (j in array) {
+ var current_j = array[j];
array[i] = current_j;
delete array[j];
}
@@ -616,16 +630,19 @@ function InnerArrayReverse(array, len) {
function ArrayReverse() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
- var array = TO_OBJECT_INLINE(this);
+ var array = TO_OBJECT(this);
var len = TO_UINT32(array.length);
+ var isArray = IS_ARRAY(array);
- if (UseSparseVariant(array, len, IS_ARRAY(array), len)) {
+ if (UseSparseVariant(array, len, isArray, len)) {
%NormalizeElements(array);
SparseReverse(array, len);
return array;
+ } else if (isArray && %_HasFastPackedElements(array)) {
+ return PackedArrayReverse(array, len);
+ } else {
+ return GenericArrayReverse(array, len);
}
-
- return InnerArrayReverse(array, len);
}
@@ -648,7 +665,7 @@ function ObservedArrayShift(len) {
function ArrayShift() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.shift");
- var array = TO_OBJECT_INLINE(this);
+ var array = TO_OBJECT(this);
var len = TO_UINT32(array.length);
if (len === 0) {
@@ -702,7 +719,7 @@ function ArrayUnshift(arg1) { // length == 1
if (%IsObserved(this))
return ObservedArrayUnshift.apply(this, arguments);
- var array = TO_OBJECT_INLINE(this);
+ var array = TO_OBJECT(this);
var len = TO_UINT32(array.length);
var num_arguments = %_ArgumentsLength();
@@ -726,7 +743,7 @@ function ArrayUnshift(arg1) { // length == 1
function ArraySlice(start, end) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.slice");
- var array = TO_OBJECT_INLINE(this);
+ var array = TO_OBJECT(this);
var len = TO_UINT32(array.length);
var start_i = TO_INTEGER(start);
var end_i = len;
@@ -844,7 +861,7 @@ function ArraySplice(start, delete_count) {
return ObservedArraySplice.apply(this, arguments);
var num_arguments = %_ArgumentsLength();
- var array = TO_OBJECT_INLINE(this);
+ var array = TO_OBJECT(this);
var len = TO_UINT32(array.length);
var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
@@ -900,8 +917,8 @@ function InnerArraySort(length, comparefn) {
if (%_IsSmi(x) && %_IsSmi(y)) {
return %SmiLexicographicCompare(x, y);
}
- x = $toString(x);
- y = $toString(y);
+ x = ToString(x);
+ y = ToString(y);
if (x == y) return 0;
else return x < y ? -1 : 1;
};
@@ -1176,7 +1193,7 @@ function InnerArraySort(length, comparefn) {
function ArraySort(comparefn) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
- var array = $toObject(this);
+ var array = TO_OBJECT(this);
var length = TO_UINT32(array.length);
return %_CallFunction(array, length, comparefn, InnerArraySort);
}
@@ -1203,7 +1220,7 @@ function InnerArrayFilter(f, receiver, array, length) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
- var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
+ var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
if (%_CallFunction(new_receiver, element, i, array, f)) {
accumulator[accumulator_length++] = element;
}
@@ -1217,8 +1234,8 @@ function ArrayFilter(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
- var array = $toObject(this);
- var length = $toUint32(array.length);
+ var array = TO_OBJECT(this);
+ var length = TO_UINT32(array.length);
var accumulator = InnerArrayFilter(f, receiver, array, length);
var result = new GlobalArray();
%MoveArrayContents(accumulator, result);
@@ -1241,7 +1258,7 @@ function InnerArrayForEach(f, receiver, array, length) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
- var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
+ var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
%_CallFunction(new_receiver, element, i, array, f);
}
}
@@ -1252,7 +1269,7 @@ function ArrayForEach(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
- var array = $toObject(this);
+ var array = TO_OBJECT(this);
var length = TO_UINT32(array.length);
InnerArrayForEach(f, receiver, array, length);
}
@@ -1274,7 +1291,7 @@ function InnerArraySome(f, receiver, array, length) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
- var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
+ var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
if (%_CallFunction(new_receiver, element, i, array, f)) return true;
}
}
@@ -1289,7 +1306,7 @@ function ArraySome(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
- var array = $toObject(this);
+ var array = TO_OBJECT(this);
var length = TO_UINT32(array.length);
return InnerArraySome(f, receiver, array, length);
}
@@ -1311,7 +1328,7 @@ function InnerArrayEvery(f, receiver, array, length) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
- var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
+ var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
if (!%_CallFunction(new_receiver, element, i, array, f)) return false;
}
}
@@ -1323,7 +1340,7 @@ function ArrayEvery(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
- var array = $toObject(this);
+ var array = TO_OBJECT(this);
var length = TO_UINT32(array.length);
return InnerArrayEvery(f, receiver, array, length);
}
@@ -1346,7 +1363,7 @@ function InnerArrayMap(f, receiver, array, length) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
- var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
+ var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
accumulator[i] = %_CallFunction(new_receiver, element, i, array, f);
}
}
@@ -1359,7 +1376,7 @@ function ArrayMap(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
- var array = $toObject(this);
+ var array = TO_OBJECT(this);
var length = TO_UINT32(array.length);
var accumulator = InnerArrayMap(f, receiver, array, length);
var result = new GlobalArray();
@@ -1528,8 +1545,8 @@ function ArrayReduce(callback, current) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
- var array = $toObject(this);
- var length = $toUint32(array.length);
+ var array = TO_OBJECT(this);
+ var length = TO_UINT32(array.length);
return InnerArrayReduce(callback, current, array, length,
%_ArgumentsLength());
}
@@ -1571,8 +1588,8 @@ function ArrayReduceRight(callback, current) {
// Pull out the length so that side effects are visible before the
// callback function is checked.
- var array = $toObject(this);
- var length = $toUint32(array.length);
+ var array = TO_OBJECT(this);
+ var length = TO_UINT32(array.length);
return InnerArrayReduceRight(callback, current, array, length,
%_ArgumentsLength());
}
@@ -1688,10 +1705,10 @@ utils.Export(function(to) {
to.InnerArrayMap = InnerArrayMap;
to.InnerArrayReduce = InnerArrayReduce;
to.InnerArrayReduceRight = InnerArrayReduceRight;
- to.InnerArrayReverse = InnerArrayReverse;
to.InnerArraySome = InnerArraySome;
to.InnerArraySort = InnerArraySort;
to.InnerArrayToLocaleString = InnerArrayToLocaleString;
+ to.PackedArrayReverse = PackedArrayReverse;
});
$arrayConcat = ArrayConcatJS;
diff --git a/deps/v8/src/arraybuffer.js b/deps/v8/src/arraybuffer.js
index 9657b9e376..2edcd12cad 100644
--- a/deps/v8/src/arraybuffer.js
+++ b/deps/v8/src/arraybuffer.js
@@ -13,13 +13,14 @@
var GlobalArrayBuffer = global.ArrayBuffer;
var GlobalObject = global.Object;
-
var MathMax;
var MathMin;
+var ToNumber;
utils.Import(function(from) {
MathMax = from.MathMax;
MathMin = from.MathMin;
+ ToNumber = from.ToNumber;
});
// -------------------------------------------------------------------
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index e6aaa914bf..b7550bb795 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -45,14 +45,14 @@
#include "src/codegen.h"
#include "src/counters.h"
#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/jsregexp.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-stack.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-stack.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/serialize.h"
#include "src/token.h"
@@ -80,21 +80,21 @@
// Include native regexp-macro-assembler.
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/regexp-macro-assembler-ia32.h" // NOLINT
+#include "src/regexp/ia32/regexp-macro-assembler-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
-#include "src/x64/regexp-macro-assembler-x64.h" // NOLINT
+#include "src/regexp/x64/regexp-macro-assembler-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/regexp-macro-assembler-arm64.h" // NOLINT
+#include "src/regexp/arm64/regexp-macro-assembler-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
+#include "src/regexp/arm/regexp-macro-assembler-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/regexp-macro-assembler-ppc.h" // NOLINT
+#include "src/regexp/ppc/regexp-macro-assembler-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
+#include "src/regexp/mips/regexp-macro-assembler-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/regexp-macro-assembler-mips64.h" // NOLINT
+#include "src/regexp/mips64/regexp-macro-assembler-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
-#include "src/x87/regexp-macro-assembler-x87.h" // NOLINT
+#include "src/regexp/x87/regexp-macro-assembler-x87.h" // NOLINT
#else // Unknown architecture.
#error "Unknown architecture."
#endif // Target architecture.
@@ -158,6 +158,10 @@ AssemblerBase::~AssemblerBase() {
// -----------------------------------------------------------------------------
// Implementation of PredictableCodeSizeScope
+PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler)
+ : PredictableCodeSizeScope(assembler, -1) {}
+
+
PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
int expected_size)
: assembler_(assembler),
@@ -248,44 +252,22 @@ int Label::pos() const {
// 10: short_data_record: [6-bit pc delta] 10 followed by
// [6-bit data delta] [2-bit data type tag]
//
-// 11: long_record [2-bit high tag][4 bit middle_tag] 11
-// followed by variable data depending on type.
+// 11: long_record [6 bit reloc mode] 11
+// followed by pc delta
+// followed by optional data depending on type.
//
// 2-bit data type tags, used in short_data_record and data_jump long_record:
// code_target_with_id: 00
// position: 01
// statement_position: 10
-// comment: 11 (not used in short_data_record)
-// deopt_reason: 11 (not used in long_data_record)
-//
-// Long record format:
-// 4-bit middle_tag:
-// 0000 - 1100 : Short record for RelocInfo::Mode middle_tag + 2
-// (The middle_tag encodes rmode - RelocInfo::LAST_COMPACT_ENUM,
-// and is between 0000 and 1100)
-// The format is:
-// 00 [4 bit middle_tag] 11 followed by
-// 00 [6 bit pc delta]
+// deopt_reason: 11
//
-// 1101: constant or veneer pool. Used only on ARM and ARM64 for now.
-// The format is: [2-bit sub-type] 1101 11
-// signed int (size of the pool).
-// The 2-bit sub-types are:
-// 00: constant pool
-// 01: veneer pool
-// 1110: long_data_record
-// The format is: [2-bit data_type_tag] 1110 11
-// signed intptr_t, lowest byte written first
-// (except data_type code_target_with_id, which
-// is followed by a signed int, not intptr_t.)
-//
-// 1111: long_pc_jump
-// The format is:
-// pc-jump: 00 1111 11,
-// 00 [6 bits pc delta]
-// or
-// pc-jump (variable length):
-// 01 1111 11,
+// If a pc delta exceeds 6 bits, it is split into a remainder that fits into
+// 6 bits and a part that does not. The latter is encoded as a long record
+// with PC_JUMP as pseudo reloc info mode. The former is encoded as part of
+// the following record in the usual way. The long pc jump record has variable
+// length:
+// pc-jump: [PC_JUMP] 11
// [7 bits data] 0
// ...
// [7 bits data] 1
@@ -294,51 +276,37 @@ int Label::pos() const {
const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1;
-const int kExtraTagBits = 4;
-const int kLocatableTypeTagBits = 2;
-const int kSmallDataBits = kBitsPerByte - kLocatableTypeTagBits;
+const int kLongTagBits = 6;
+const int kShortDataTypeTagBits = 2;
+const int kShortDataBits = kBitsPerByte - kShortDataTypeTagBits;
const int kEmbeddedObjectTag = 0;
const int kCodeTargetTag = 1;
const int kLocatableTag = 2;
const int kDefaultTag = 3;
-const int kPCJumpExtraTag = (1 << kExtraTagBits) - 1;
-
const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
const int RelocInfo::kMaxSmallPCDelta = kSmallPCDeltaMask;
-const int kVariableLengthPCJumpTopTag = 1;
const int kChunkBits = 7;
const int kChunkMask = (1 << kChunkBits) - 1;
const int kLastChunkTagBits = 1;
const int kLastChunkTagMask = 1;
const int kLastChunkTag = 1;
-
-const int kDataJumpExtraTag = kPCJumpExtraTag - 1;
-
const int kCodeWithIdTag = 0;
const int kNonstatementPositionTag = 1;
const int kStatementPositionTag = 2;
-const int kCommentTag = 3;
-
-// Reuse the same value for deopt reason tag in short record format.
-// It is possible because we use kCommentTag only for the long record format.
const int kDeoptReasonTag = 3;
-const int kPoolExtraTag = kPCJumpExtraTag - 2;
-const int kConstPoolTag = 0;
-const int kVeneerPoolTag = 1;
-
-uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
+uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
// Return if the pc_delta can fit in kSmallPCDeltaBits bits.
// Otherwise write a variable length PC jump for the bits that do
// not fit in the kSmallPCDeltaBits bits.
if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
- WriteExtraTag(kPCJumpExtraTag, kVariableLengthPCJumpTopTag);
+ WriteMode(RelocInfo::PC_JUMP);
uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
DCHECK(pc_jump > 0);
// Write kChunkBits size chunks of the pc_jump.
@@ -353,55 +321,42 @@ uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
}
-void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) {
- // Write a byte of tagged pc-delta, possibly preceded by var. length pc-jump.
- pc_delta = WriteVariableLengthPCJump(pc_delta);
+void RelocInfoWriter::WriteShortTaggedPC(uint32_t pc_delta, int tag) {
+ // Write a byte of tagged pc-delta, possibly preceded by an explicit pc-jump.
+ pc_delta = WriteLongPCJump(pc_delta);
*--pos_ = pc_delta << kTagBits | tag;
}
-void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) {
- *--pos_ = static_cast<byte>(data_delta << kLocatableTypeTagBits | tag);
+void RelocInfoWriter::WriteShortTaggedData(intptr_t data_delta, int tag) {
+ *--pos_ = static_cast<byte>(data_delta << kShortDataTypeTagBits | tag);
}
-void RelocInfoWriter::WriteExtraTag(int extra_tag, int top_tag) {
- *--pos_ = static_cast<int>(top_tag << (kTagBits + kExtraTagBits) |
- extra_tag << kTagBits |
- kDefaultTag);
+void RelocInfoWriter::WriteMode(RelocInfo::Mode rmode) {
+ STATIC_ASSERT(RelocInfo::NUMBER_OF_MODES <= (1 << kLongTagBits));
+ *--pos_ = static_cast<int>((rmode << kTagBits) | kDefaultTag);
}
-void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) {
+void RelocInfoWriter::WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode) {
// Write two-byte tagged pc-delta, possibly preceded by var. length pc-jump.
- pc_delta = WriteVariableLengthPCJump(pc_delta);
- WriteExtraTag(extra_tag, 0);
+ pc_delta = WriteLongPCJump(pc_delta);
+ WriteMode(rmode);
*--pos_ = pc_delta;
}
-void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
- WriteExtraTag(kDataJumpExtraTag, top_tag);
- for (int i = 0; i < kIntSize; i++) {
- *--pos_ = static_cast<byte>(data_delta);
- // Signed right shift is arithmetic shift. Tested in test-utils.cc.
- data_delta = data_delta >> kBitsPerByte;
- }
-}
-
-
-void RelocInfoWriter::WriteExtraTaggedPoolData(int data, int pool_type) {
- WriteExtraTag(kPoolExtraTag, pool_type);
+void RelocInfoWriter::WriteIntData(int number) {
for (int i = 0; i < kIntSize; i++) {
- *--pos_ = static_cast<byte>(data);
+ *--pos_ = static_cast<byte>(number);
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
- data = data >> kBitsPerByte;
+ number = number >> kBitsPerByte;
}
}
-void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
- WriteExtraTag(kDataJumpExtraTag, top_tag);
+void RelocInfoWriter::WriteData(intptr_t data_delta) {
for (int i = 0; i < kIntptrSize; i++) {
*--pos_ = static_cast<byte>(data_delta);
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
@@ -415,13 +370,13 @@ void RelocInfoWriter::WritePosition(int pc_delta, int pos_delta,
int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag
: kStatementPositionTag;
// Check if delta is small enough to fit in a tagged byte.
- if (is_intn(pos_delta, kSmallDataBits)) {
- WriteTaggedPC(pc_delta, kLocatableTag);
- WriteTaggedData(pos_delta, pos_type_tag);
+ if (is_intn(pos_delta, kShortDataBits)) {
+ WriteShortTaggedPC(pc_delta, kLocatableTag);
+ WriteShortTaggedData(pos_delta, pos_type_tag);
} else {
// Otherwise, use costly encoding.
- WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
- WriteExtraTaggedIntData(pos_delta, pos_type_tag);
+ WriteModeAndPC(pc_delta, rmode);
+ WriteIntData(pos_delta);
}
}
@@ -452,28 +407,28 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
// The two most common modes are given small tags, and usually fit in a byte.
if (rmode == RelocInfo::EMBEDDED_OBJECT) {
- WriteTaggedPC(pc_delta, kEmbeddedObjectTag);
+ WriteShortTaggedPC(pc_delta, kEmbeddedObjectTag);
} else if (rmode == RelocInfo::CODE_TARGET) {
- WriteTaggedPC(pc_delta, kCodeTargetTag);
+ WriteShortTaggedPC(pc_delta, kCodeTargetTag);
DCHECK(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
} else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
// Use signed delta-encoding for id.
DCHECK_EQ(static_cast<int>(rinfo->data()), rinfo->data());
int id_delta = static_cast<int>(rinfo->data()) - last_id_;
// Check if delta is small enough to fit in a tagged byte.
- if (is_intn(id_delta, kSmallDataBits)) {
- WriteTaggedPC(pc_delta, kLocatableTag);
- WriteTaggedData(id_delta, kCodeWithIdTag);
+ if (is_intn(id_delta, kShortDataBits)) {
+ WriteShortTaggedPC(pc_delta, kLocatableTag);
+ WriteShortTaggedData(id_delta, kCodeWithIdTag);
} else {
// Otherwise, use costly encoding.
- WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
- WriteExtraTaggedIntData(id_delta, kCodeWithIdTag);
+ WriteModeAndPC(pc_delta, rmode);
+ WriteIntData(id_delta);
}
last_id_ = static_cast<int>(rinfo->data());
} else if (rmode == RelocInfo::DEOPT_REASON) {
- DCHECK(rinfo->data() < (1 << kSmallDataBits));
- WriteTaggedPC(pc_delta, kLocatableTag);
- WriteTaggedData(rinfo->data(), kDeoptReasonTag);
+ DCHECK(rinfo->data() < (1 << kShortDataBits));
+ WriteShortTaggedPC(pc_delta, kLocatableTag);
+ WriteShortTaggedData(rinfo->data(), kDeoptReasonTag);
} else if (RelocInfo::IsPosition(rmode)) {
// Use signed delta-encoding for position.
DCHECK_EQ(static_cast<int>(rinfo->data()), rinfo->data());
@@ -492,27 +447,15 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
next_position_candidate_flushed_ = false;
}
last_position_ = static_cast<int>(rinfo->data());
- } else if (RelocInfo::IsComment(rmode)) {
- // Comments are normally not generated, so we use the costly encoding.
- WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
- WriteExtraTaggedData(rinfo->data(), kCommentTag);
- DCHECK(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
- } else if (RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)) {
- WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
- WriteExtraTaggedPoolData(static_cast<int>(rinfo->data()),
- RelocInfo::IsConstPool(rmode) ? kConstPoolTag
- : kVeneerPoolTag);
} else {
- DCHECK(rmode > RelocInfo::LAST_COMPACT_ENUM);
- DCHECK(rmode <= RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM);
- STATIC_ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM -
- RelocInfo::LAST_COMPACT_ENUM <=
- kPoolExtraTag);
- int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM - 1;
- // For all other modes we simply use the mode as the extra tag.
- // None of these modes need a data component.
- DCHECK(0 <= saved_mode && saved_mode < kPoolExtraTag);
- WriteExtraTaggedPC(pc_delta, saved_mode);
+ WriteModeAndPC(pc_delta, rmode);
+ if (RelocInfo::IsComment(rmode)) {
+ WriteData(rinfo->data());
+ } else if (RelocInfo::IsConstPool(rmode) ||
+ RelocInfo::IsVeneerPool(rmode) ||
+ RelocInfo::IsDebugBreakSlotAtCall(rmode)) {
+ WriteIntData(static_cast<int>(rinfo->data()));
+ }
}
last_pc_ = rinfo->pc();
last_mode_ = rmode;
@@ -527,17 +470,13 @@ inline int RelocIterator::AdvanceGetTag() {
}
-inline int RelocIterator::GetExtraTag() {
- return (*pos_ >> kTagBits) & ((1 << kExtraTagBits) - 1);
-}
-
-
-inline int RelocIterator::GetTopTag() {
- return *pos_ >> (kTagBits + kExtraTagBits);
+inline RelocInfo::Mode RelocIterator::GetMode() {
+ return static_cast<RelocInfo::Mode>((*pos_ >> kTagBits) &
+ ((1 << kLongTagBits) - 1));
}
-inline void RelocIterator::ReadTaggedPC() {
+inline void RelocIterator::ReadShortTaggedPC() {
rinfo_.pc_ += *pos_ >> kTagBits;
}
@@ -557,7 +496,7 @@ void RelocIterator::AdvanceReadId() {
}
-void RelocIterator::AdvanceReadPoolData() {
+void RelocIterator::AdvanceReadInt() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
@@ -585,7 +524,7 @@ void RelocIterator::AdvanceReadData() {
}
-void RelocIterator::AdvanceReadVariableLengthPCJump() {
+void RelocIterator::AdvanceReadLongPCJump() {
// Read the 32-kSmallPCDeltaBits most significant bits of the
// pc jump in kChunkBits bit chunks and shift them into place.
// Stop when the last chunk is encountered.
@@ -601,28 +540,28 @@ void RelocIterator::AdvanceReadVariableLengthPCJump() {
}
-inline int RelocIterator::GetLocatableTypeTag() {
- return *pos_ & ((1 << kLocatableTypeTagBits) - 1);
+inline int RelocIterator::GetShortDataTypeTag() {
+ return *pos_ & ((1 << kShortDataTypeTagBits) - 1);
}
-inline void RelocIterator::ReadTaggedId() {
+inline void RelocIterator::ReadShortTaggedId() {
int8_t signed_b = *pos_;
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
- last_id_ += signed_b >> kLocatableTypeTagBits;
+ last_id_ += signed_b >> kShortDataTypeTagBits;
rinfo_.data_ = last_id_;
}
-inline void RelocIterator::ReadTaggedPosition() {
+inline void RelocIterator::ReadShortTaggedPosition() {
int8_t signed_b = *pos_;
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
- last_position_ += signed_b >> kLocatableTypeTagBits;
+ last_position_ += signed_b >> kShortDataTypeTagBits;
rinfo_.data_ = last_position_;
}
-inline void RelocIterator::ReadTaggedData() {
+inline void RelocIterator::ReadShortTaggedData() {
uint8_t unsigned_b = *pos_;
rinfo_.data_ = unsigned_b >> kTagBits;
}
@@ -647,79 +586,74 @@ void RelocIterator::next() {
while (pos_ > end_) {
int tag = AdvanceGetTag();
if (tag == kEmbeddedObjectTag) {
- ReadTaggedPC();
+ ReadShortTaggedPC();
if (SetMode(RelocInfo::EMBEDDED_OBJECT)) return;
} else if (tag == kCodeTargetTag) {
- ReadTaggedPC();
+ ReadShortTaggedPC();
if (SetMode(RelocInfo::CODE_TARGET)) return;
} else if (tag == kLocatableTag) {
- ReadTaggedPC();
+ ReadShortTaggedPC();
Advance();
- int locatable_tag = GetLocatableTypeTag();
- if (locatable_tag == kCodeWithIdTag) {
+ int data_type_tag = GetShortDataTypeTag();
+ if (data_type_tag == kCodeWithIdTag) {
if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
- ReadTaggedId();
+ ReadShortTaggedId();
+ return;
+ }
+ } else if (data_type_tag == kDeoptReasonTag) {
+ if (SetMode(RelocInfo::DEOPT_REASON)) {
+ ReadShortTaggedData();
return;
}
- } else if (locatable_tag == kDeoptReasonTag) {
- ReadTaggedData();
- if (SetMode(RelocInfo::DEOPT_REASON)) return;
} else {
- DCHECK(locatable_tag == kNonstatementPositionTag ||
- locatable_tag == kStatementPositionTag);
+ DCHECK(data_type_tag == kNonstatementPositionTag ||
+ data_type_tag == kStatementPositionTag);
if (mode_mask_ & RelocInfo::kPositionMask) {
- ReadTaggedPosition();
- if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
+ // Always update the position if we are interested in either
+ // statement positions or non-statement positions.
+ ReadShortTaggedPosition();
+ if (SetMode(GetPositionModeFromTag(data_type_tag))) return;
}
}
} else {
DCHECK(tag == kDefaultTag);
- int extra_tag = GetExtraTag();
- if (extra_tag == kPCJumpExtraTag) {
- if (GetTopTag() == kVariableLengthPCJumpTopTag) {
- AdvanceReadVariableLengthPCJump();
- } else {
- AdvanceReadPC();
- }
- } else if (extra_tag == kDataJumpExtraTag) {
- int locatable_tag = GetTopTag();
- if (locatable_tag == kCodeWithIdTag) {
- if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
+ RelocInfo::Mode rmode = GetMode();
+ if (rmode == RelocInfo::PC_JUMP) {
+ AdvanceReadLongPCJump();
+ } else {
+ AdvanceReadPC();
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+ if (SetMode(rmode)) {
AdvanceReadId();
return;
}
Advance(kIntSize);
- } else if (locatable_tag != kCommentTag) {
- DCHECK(locatable_tag == kNonstatementPositionTag ||
- locatable_tag == kStatementPositionTag);
+ } else if (RelocInfo::IsComment(rmode)) {
+ if (SetMode(rmode)) {
+ AdvanceReadData();
+ return;
+ }
+ Advance(kIntptrSize);
+ } else if (RelocInfo::IsPosition(rmode)) {
if (mode_mask_ & RelocInfo::kPositionMask) {
+ // Always update the position if we are interested in either
+ // statement positions or non-statement positions.
AdvanceReadPosition();
- if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
+ if (SetMode(rmode)) return;
} else {
Advance(kIntSize);
}
- } else {
- DCHECK(locatable_tag == kCommentTag);
- if (SetMode(RelocInfo::COMMENT)) {
- AdvanceReadData();
+ } else if (RelocInfo::IsConstPool(rmode) ||
+ RelocInfo::IsVeneerPool(rmode) ||
+ RelocInfo::IsDebugBreakSlotAtCall(rmode)) {
+ if (SetMode(rmode)) {
+ AdvanceReadInt();
return;
}
- Advance(kIntptrSize);
- }
- } else if (extra_tag == kPoolExtraTag) {
- int pool_type = GetTopTag();
- DCHECK(pool_type == kConstPoolTag || pool_type == kVeneerPoolTag);
- RelocInfo::Mode rmode = (pool_type == kConstPoolTag) ?
- RelocInfo::CONST_POOL : RelocInfo::VENEER_POOL;
- if (SetMode(rmode)) {
- AdvanceReadPoolData();
+ Advance(kIntSize);
+ } else if (SetMode(static_cast<RelocInfo::Mode>(rmode))) {
return;
}
- Advance(kIntSize);
- } else {
- AdvanceReadPC();
- int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM + 1;
- if (SetMode(static_cast<RelocInfo::Mode>(rmode))) return;
}
}
}
@@ -799,49 +733,56 @@ bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
#ifdef ENABLE_DISASSEMBLER
const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
switch (rmode) {
- case RelocInfo::NONE32:
+ case NONE32:
return "no reloc 32";
- case RelocInfo::NONE64:
+ case NONE64:
return "no reloc 64";
- case RelocInfo::EMBEDDED_OBJECT:
+ case EMBEDDED_OBJECT:
return "embedded object";
- case RelocInfo::CONSTRUCT_CALL:
+ case CONSTRUCT_CALL:
return "code target (js construct call)";
- case RelocInfo::DEBUG_BREAK:
- return "debug break";
- case RelocInfo::CODE_TARGET:
+ case DEBUGGER_STATEMENT:
+ return "debugger statement";
+ case CODE_TARGET:
return "code target";
- case RelocInfo::CODE_TARGET_WITH_ID:
+ case CODE_TARGET_WITH_ID:
return "code target with id";
- case RelocInfo::CELL:
+ case CELL:
return "property cell";
- case RelocInfo::RUNTIME_ENTRY:
+ case RUNTIME_ENTRY:
return "runtime entry";
- case RelocInfo::JS_RETURN:
- return "js return";
- case RelocInfo::COMMENT:
+ case COMMENT:
return "comment";
- case RelocInfo::POSITION:
+ case POSITION:
return "position";
- case RelocInfo::STATEMENT_POSITION:
+ case STATEMENT_POSITION:
return "statement position";
- case RelocInfo::EXTERNAL_REFERENCE:
+ case EXTERNAL_REFERENCE:
return "external reference";
- case RelocInfo::INTERNAL_REFERENCE:
+ case INTERNAL_REFERENCE:
return "internal reference";
- case RelocInfo::INTERNAL_REFERENCE_ENCODED:
+ case INTERNAL_REFERENCE_ENCODED:
return "encoded internal reference";
- case RelocInfo::DEOPT_REASON:
+ case DEOPT_REASON:
return "deopt reason";
- case RelocInfo::CONST_POOL:
+ case CONST_POOL:
return "constant pool";
- case RelocInfo::VENEER_POOL:
+ case VENEER_POOL:
return "veneer pool";
- case RelocInfo::DEBUG_BREAK_SLOT:
- return "debug break slot";
- case RelocInfo::CODE_AGE_SEQUENCE:
- return "code_age_sequence";
- case RelocInfo::NUMBER_OF_MODES:
+ case DEBUG_BREAK_SLOT_AT_POSITION:
+ return "debug break slot at position";
+ case DEBUG_BREAK_SLOT_AT_RETURN:
+ return "debug break slot at return";
+ case DEBUG_BREAK_SLOT_AT_CALL:
+ return "debug break slot at call";
+ case DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL:
+ return "debug break slot at construct call";
+ case CODE_AGE_SEQUENCE:
+ return "code age sequence";
+ case GENERATOR_CONTINUATION:
+ return "generator continuation";
+ case NUMBER_OF_MODES:
+ case PC_JUMP:
UNREACHABLE();
return "number_of_modes";
}
@@ -899,7 +840,7 @@ void RelocInfo::Verify(Isolate* isolate) {
case CELL:
Object::VerifyPointer(target_cell());
break;
- case DEBUG_BREAK:
+ case DEBUGGER_STATEMENT:
case CONSTRUCT_CALL:
case CODE_TARGET_WITH_ID:
case CODE_TARGET: {
@@ -923,7 +864,6 @@ void RelocInfo::Verify(Isolate* isolate) {
break;
}
case RUNTIME_ENTRY:
- case JS_RETURN:
case COMMENT:
case POSITION:
case STATEMENT_POSITION:
@@ -931,11 +871,16 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEOPT_REASON:
case CONST_POOL:
case VENEER_POOL:
- case DEBUG_BREAK_SLOT:
+ case DEBUG_BREAK_SLOT_AT_POSITION:
+ case DEBUG_BREAK_SLOT_AT_RETURN:
+ case DEBUG_BREAK_SLOT_AT_CALL:
+ case DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL:
+ case GENERATOR_CONTINUATION:
case NONE32:
case NONE64:
break;
case NUMBER_OF_MODES:
+ case PC_JUMP:
UNREACHABLE();
break;
case CODE_AGE_SEQUENCE:
@@ -946,6 +891,11 @@ void RelocInfo::Verify(Isolate* isolate) {
#endif // VERIFY_HEAP
+int RelocInfo::DebugBreakCallArgumentsCount(intptr_t data) {
+ return static_cast<int>(data);
+}
+
+
// -----------------------------------------------------------------------------
// Implementation of ExternalReference
@@ -1045,11 +995,6 @@ ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
}
-ExternalReference::ExternalReference(const IC_Utility& ic_utility,
- Isolate* isolate)
- : address_(Redirect(isolate, ic_utility.address())) {}
-
-
ExternalReference::ExternalReference(StatsCounter* counter)
: address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
@@ -1551,17 +1496,18 @@ ExternalReference ExternalReference::mod_two_doubles_operation(
}
-ExternalReference ExternalReference::debug_break(Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug_Break)));
-}
-
-
ExternalReference ExternalReference::debug_step_in_fp_address(
Isolate* isolate) {
return ExternalReference(isolate->debug()->step_in_fp_addr());
}
+ExternalReference ExternalReference::fixed_typed_array_base_data_offset() {
+ return ExternalReference(reinterpret_cast<void*>(
+ FixedTypedArrayBase::kDataOffset - kHeapObjectTag));
+}
+
+
bool operator==(ExternalReference lhs, ExternalReference rhs) {
return lhs.address() == rhs.address();
}
@@ -1860,16 +1806,17 @@ void Assembler::RecordComment(const char* msg) {
}
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
+void Assembler::RecordGeneratorContinuation() {
EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::JS_RETURN);
+ RecordRelocInfo(RelocInfo::GENERATOR_CONTINUATION);
}
-void Assembler::RecordDebugBreakSlot() {
+void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode, int call_argc) {
EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+ DCHECK(RelocInfo::IsDebugBreakSlot(mode));
+ intptr_t data = static_cast<intptr_t>(call_argc);
+ RecordRelocInfo(mode, data);
}
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index fb59ceb7bd..a4c19e6b02 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -35,22 +35,22 @@
#ifndef V8_ASSEMBLER_H_
#define V8_ASSEMBLER_H_
-#include "src/v8.h"
-
#include "src/allocation.h"
#include "src/builtins.h"
-#include "src/gdb-jit.h"
#include "src/isolate.h"
#include "src/runtime/runtime.h"
#include "src/token.h"
namespace v8 {
+// Forward declarations.
class ApiFunction;
namespace internal {
+// Forward declarations.
class StatsCounter;
+
// -----------------------------------------------------------------------------
// Platform independent assembler base class.
@@ -158,8 +158,10 @@ class DontEmitDebugCodeScope BASE_EMBEDDED {
// snapshot and the running VM.
class PredictableCodeSizeScope {
public:
+ explicit PredictableCodeSizeScope(AssemblerBase* assembler);
PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
~PredictableCodeSizeScope();
+ void ExpectSize(int expected_size) { expected_size_ = expected_size; }
private:
AssemblerBase* assembler_;
@@ -349,10 +351,9 @@ class RelocInfo {
// we do not normally record relocation info.
static const char* const kFillerCommentString;
- // The minimum size of a comment is equal to three bytes for the extra tagged
- // pc + the tag for the data, and kPointerSize for the actual pointer to the
- // comment.
- static const int kMinRelocCommentSize = 3 + kPointerSize;
+ // The minimum size of a comment is equal to two bytes for the extra tagged
+ // pc and kPointerSize for the actual pointer to the comment.
+ static const int kMinRelocCommentSize = 2 + kPointerSize;
// The maximum size for a call instruction including pc-jump.
static const int kMaxCallSize = 6;
@@ -365,23 +366,31 @@ class RelocInfo {
CODE_TARGET, // Code target which is not any of the above.
CODE_TARGET_WITH_ID,
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
- DEBUG_BREAK, // Code target for the debugger statement.
+ DEBUGGER_STATEMENT, // Code target for the debugger statement.
EMBEDDED_OBJECT,
CELL,
// Everything after runtime_entry (inclusive) is not GC'ed.
RUNTIME_ENTRY,
- JS_RETURN, // Marks start of the ExitJSFrame code.
COMMENT,
POSITION, // See comment for kNoPosition above.
STATEMENT_POSITION, // See comment for kNoPosition above.
- DEBUG_BREAK_SLOT, // Additional code inserted for debug break slot.
+
+ // Additional code inserted for debug break slot.
+ DEBUG_BREAK_SLOT_AT_POSITION,
+ DEBUG_BREAK_SLOT_AT_RETURN,
+ DEBUG_BREAK_SLOT_AT_CALL,
+ DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL,
+
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
// Encoded internal reference, used only on MIPS, MIPS64 and PPC.
INTERNAL_REFERENCE_ENCODED,
+ // Continuation points for a generator yield.
+ GENERATOR_CONTINUATION,
+
// Marks constant and veneer pools. Only used on ARM and ARM64.
// They use a custom noncompact encoding.
CONST_POOL,
@@ -389,9 +398,12 @@ class RelocInfo {
DEOPT_REASON, // Deoptimization reason index.
- // add more as needed
+ // This is not an actual reloc mode, but used to encode a long pc jump that
+ // cannot be encoded as part of another record.
+ PC_JUMP,
+
// Pseudo-types
- NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding.
+ NUMBER_OF_MODES,
NONE32, // never recorded 32-bit value
NONE64, // never recorded 64-bit value
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
@@ -399,15 +411,12 @@ class RelocInfo {
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
- FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
- LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
- LAST_CODE_ENUM = DEBUG_BREAK,
+ LAST_CODE_ENUM = DEBUGGER_STATEMENT,
LAST_GCED_ENUM = CELL,
- // Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
- LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID,
- LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE_ENCODED
};
+ STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt);
+
RelocInfo() {}
RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
@@ -418,11 +427,6 @@ class RelocInfo {
return mode >= FIRST_REAL_RELOC_MODE &&
mode <= LAST_REAL_RELOC_MODE;
}
- static inline bool IsPseudoRelocMode(Mode mode) {
- DCHECK(!IsRealRelocMode(mode));
- return mode >= FIRST_PSEUDO_RELOC_MODE &&
- mode <= LAST_PSEUDO_RELOC_MODE;
- }
static inline bool IsConstructCall(Mode mode) {
return mode == CONSTRUCT_CALL;
}
@@ -440,9 +444,6 @@ class RelocInfo {
static inline bool IsGCRelocMode(Mode mode) {
return mode <= LAST_GCED_ENUM;
}
- static inline bool IsJSReturn(Mode mode) {
- return mode == JS_RETURN;
- }
static inline bool IsComment(Mode mode) {
return mode == COMMENT;
}
@@ -471,10 +472,24 @@ class RelocInfo {
return mode == INTERNAL_REFERENCE_ENCODED;
}
static inline bool IsDebugBreakSlot(Mode mode) {
- return mode == DEBUG_BREAK_SLOT;
+ return IsDebugBreakSlotAtPosition(mode) || IsDebugBreakSlotAtReturn(mode) ||
+ IsDebugBreakSlotAtCall(mode) ||
+ IsDebugBreakSlotAtConstructCall(mode);
+ }
+ static inline bool IsDebugBreakSlotAtPosition(Mode mode) {
+ return mode == DEBUG_BREAK_SLOT_AT_POSITION;
+ }
+ static inline bool IsDebugBreakSlotAtReturn(Mode mode) {
+ return mode == DEBUG_BREAK_SLOT_AT_RETURN;
+ }
+ static inline bool IsDebugBreakSlotAtCall(Mode mode) {
+ return mode == DEBUG_BREAK_SLOT_AT_CALL;
+ }
+ static inline bool IsDebugBreakSlotAtConstructCall(Mode mode) {
+ return mode == DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL;
}
static inline bool IsDebuggerStatement(Mode mode) {
- return mode == DEBUG_BREAK;
+ return mode == DEBUGGER_STATEMENT;
}
static inline bool IsNone(Mode mode) {
return mode == NONE32 || mode == NONE64;
@@ -482,6 +497,9 @@ class RelocInfo {
static inline bool IsCodeAgeSequence(Mode mode) {
return mode == CODE_AGE_SEQUENCE;
}
+ static inline bool IsGeneratorContinuation(Mode mode) {
+ return mode == GENERATOR_CONTINUATION;
+ }
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
@@ -492,10 +510,11 @@ class RelocInfo {
Code* host() const { return host_; }
void set_host(Code* host) { host_ = host; }
- // Apply a relocation by delta bytes
- INLINE(void apply(intptr_t delta,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED));
+ // Apply a relocation by delta bytes. When the code object is moved, PC
+ // relative addresses have to be updated as well as absolute addresses
+ // inside the code (internal references).
+ // Do not forget to flush the icache afterwards!
+ INLINE(void apply(intptr_t delta));
// Is the pointer this relocation info refers to coded like a plain pointer
// or is it strange in some way (e.g. relative or patched into a series of
@@ -506,6 +525,8 @@ class RelocInfo {
// constant pool, otherwise the pointer is embedded in the instruction stream.
bool IsInConstantPool();
+ static int DebugBreakCallArgumentsCount(intptr_t data);
+
// Read/modify the code target in the branch/call instruction
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@@ -579,11 +600,8 @@ class RelocInfo {
// Read/modify the address of a call instruction. This is used to relocate
// the break points where straight-line code is patched with a call
// instruction.
- INLINE(Address call_address());
- INLINE(void set_call_address(Address target));
- INLINE(Object* call_object());
- INLINE(void set_call_object(Object* target));
- INLINE(Object** call_object_address());
+ INLINE(Address debug_call_address());
+ INLINE(void set_debug_call_address(Address target));
// Wipe out a relocation to a fixed value, used for making snapshots
// reproducible.
@@ -622,7 +640,10 @@ class RelocInfo {
static const int kPositionMask = 1 << POSITION | 1 << STATEMENT_POSITION;
static const int kDataMask =
(1 << CODE_TARGET_WITH_ID) | kPositionMask | (1 << COMMENT);
- static const int kApplyMask; // Modes affected by apply. Depends on arch.
+ static const int kDebugBreakSlotMask =
+ 1 << DEBUG_BREAK_SLOT_AT_POSITION | 1 << DEBUG_BREAK_SLOT_AT_RETURN |
+ 1 << DEBUG_BREAK_SLOT_AT_CALL | 1 << DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL;
+ static const int kApplyMask; // Modes affected by apply. Depends on arch.
private:
// On ARM, note that pc_ is the address of the constant pool entry
@@ -680,21 +701,22 @@ class RelocInfoWriter BASE_EMBEDDED {
void Finish() { FlushPosition(); }
// Max size (bytes) of a written RelocInfo. Longest encoding is
- // ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, ExtraTag, data_delta.
- // On ia32 and arm this is 1 + 4 + 1 + 1 + 1 + 4 = 12.
- // On x64 this is 1 + 4 + 1 + 1 + 1 + 8 == 16;
+ // ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, data_delta.
+ // On ia32 and arm this is 1 + 4 + 1 + 1 + 4 = 11.
+ // On x64 this is 1 + 4 + 1 + 1 + 8 == 15;
// Here we use the maximum of the two.
- static const int kMaxSize = 16;
+ static const int kMaxSize = 15;
private:
- inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);
- inline void WriteTaggedPC(uint32_t pc_delta, int tag);
- inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
- inline void WriteExtraTaggedIntData(int data_delta, int top_tag);
- inline void WriteExtraTaggedPoolData(int data, int pool_type);
- inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
- inline void WriteTaggedData(intptr_t data_delta, int tag);
- inline void WriteExtraTag(int extra_tag, int top_tag);
+ inline uint32_t WriteLongPCJump(uint32_t pc_delta);
+
+ inline void WriteShortTaggedPC(uint32_t pc_delta, int tag);
+ inline void WriteShortTaggedData(intptr_t data_delta, int tag);
+
+ inline void WriteMode(RelocInfo::Mode rmode);
+ inline void WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode);
+ inline void WriteIntData(int data_delta);
+ inline void WriteData(intptr_t data_delta);
inline void WritePosition(int pc_delta, int pos_delta, RelocInfo::Mode rmode);
void FlushPosition();
@@ -745,19 +767,21 @@ class RelocIterator: public Malloced {
// *Get* just reads and returns info on current byte.
void Advance(int bytes = 1) { pos_ -= bytes; }
int AdvanceGetTag();
- int GetExtraTag();
- int GetTopTag();
- void ReadTaggedPC();
+ RelocInfo::Mode GetMode();
+
+ void AdvanceReadLongPCJump();
+
+ int GetShortDataTypeTag();
+ void ReadShortTaggedPC();
+ void ReadShortTaggedId();
+ void ReadShortTaggedPosition();
+ void ReadShortTaggedData();
+
void AdvanceReadPC();
void AdvanceReadId();
- void AdvanceReadPoolData();
+ void AdvanceReadInt();
void AdvanceReadPosition();
void AdvanceReadData();
- void AdvanceReadVariableLengthPCJump();
- int GetLocatableTypeTag();
- void ReadTaggedId();
- void ReadTaggedPosition();
- void ReadTaggedData();
// If the given mode is wanted, set it in rinfo_ and return true.
// Else return false. Used for efficiently skipping unwanted modes.
@@ -781,7 +805,6 @@ class RelocIterator: public Malloced {
// External function
//----------------------------------------------------------------------------
-class IC_Utility;
class SCTableReference;
class Debug_Address;
@@ -851,8 +874,6 @@ class ExternalReference BASE_EMBEDDED {
ExternalReference(const Runtime::Function* f, Isolate* isolate);
- ExternalReference(const IC_Utility& ic_utility, Isolate* isolate);
-
explicit ExternalReference(StatsCounter* counter);
ExternalReference(Isolate::AddressId id, Isolate* isolate);
@@ -967,9 +988,6 @@ class ExternalReference BASE_EMBEDDED {
Address address() const { return reinterpret_cast<Address>(address_); }
- // Function Debug::Break()
- static ExternalReference debug_break(Isolate* isolate);
-
// Used to check if single stepping is enabled in generated code.
static ExternalReference debug_step_in_fp_address(Isolate* isolate);
@@ -1002,6 +1020,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference stress_deopt_count(Isolate* isolate);
+ static ExternalReference fixed_typed_array_base_data_offset();
+
private:
explicit ExternalReference(void* address)
: address_(address) {}
diff --git a/deps/v8/src/assert-scope.cc b/deps/v8/src/assert-scope.cc
index 3b91cf4389..6cc2e5a081 100644
--- a/deps/v8/src/assert-scope.cc
+++ b/deps/v8/src/assert-scope.cc
@@ -6,7 +6,7 @@
#include "src/base/lazy-instance.h"
#include "src/base/platform/platform.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/isolate.h"
#include "src/utils.h"
diff --git a/deps/v8/src/ast-literal-reindexer.cc b/deps/v8/src/ast-literal-reindexer.cc
index 50729be251..860a3961f0 100644
--- a/deps/v8/src/ast-literal-reindexer.cc
+++ b/deps/v8/src/ast-literal-reindexer.cc
@@ -1,10 +1,10 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-#include "src/ast.h"
#include "src/ast-literal-reindexer.h"
+
+#include "src/ast.h"
#include "src/scopes.h"
namespace v8 {
diff --git a/deps/v8/src/ast-literal-reindexer.h b/deps/v8/src/ast-literal-reindexer.h
index 59b214fecd..9e445129a5 100644
--- a/deps/v8/src/ast-literal-reindexer.h
+++ b/deps/v8/src/ast-literal-reindexer.h
@@ -5,8 +5,6 @@
#ifndef V8_AST_LITERAL_REINDEXER
#define V8_AST_LITERAL_REINDEXER
-#include "src/v8.h"
-
#include "src/ast.h"
#include "src/scopes.h"
diff --git a/deps/v8/src/ast-numbering.cc b/deps/v8/src/ast-numbering.cc
index 151cc8abc1..dc0528caa0 100644
--- a/deps/v8/src/ast-numbering.cc
+++ b/deps/v8/src/ast-numbering.cc
@@ -2,19 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/ast-numbering.h"
#include "src/ast.h"
-#include "src/ast-numbering.h"
#include "src/scopes.h"
namespace v8 {
namespace internal {
-
class AstNumberingVisitor final : public AstVisitor {
public:
- explicit AstNumberingVisitor(Isolate* isolate, Zone* zone)
+ AstNumberingVisitor(Isolate* isolate, Zone* zone)
: AstVisitor(),
next_id_(BailoutId::FirstUsable().ToInt()),
properties_(zone),
@@ -33,6 +31,10 @@ class AstNumberingVisitor final : public AstVisitor {
bool Finish(FunctionLiteral* node);
+ void VisitVariableProxyReference(VariableProxy* node);
+ void VisitPropertyReference(Property* node);
+ void VisitReference(Expression* expr);
+
void VisitStatements(ZoneList<Statement*>* statements) override;
void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
void VisitArguments(ZoneList<Expression*>* arguments);
@@ -46,7 +48,7 @@ class AstNumberingVisitor final : public AstVisitor {
void IncrementNodeCount() { properties_.add_node_count(1); }
void DisableSelfOptimization() {
- properties_.flags()->Add(kDontSelfOptimize);
+ properties_.flags() |= AstProperties::kDontSelfOptimize;
}
void DisableOptimization(BailoutReason reason) {
dont_optimize_reason_ = reason;
@@ -54,10 +56,11 @@ class AstNumberingVisitor final : public AstVisitor {
}
void DisableCrankshaft(BailoutReason reason) {
if (FLAG_turbo_shipping) {
- return properties_.flags()->Add(kDontCrankshaft);
+ properties_.flags() |= AstProperties::kDontCrankshaft;
+ } else {
+ dont_optimize_reason_ = reason;
+ DisableSelfOptimization();
}
- dont_optimize_reason_ = reason;
- DisableSelfOptimization();
}
template <typename Node>
@@ -147,16 +150,21 @@ void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
}
-void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
+void AstNumberingVisitor::VisitVariableProxyReference(VariableProxy* node) {
IncrementNodeCount();
if (node->var()->IsLookupSlot()) {
DisableCrankshaft(kReferenceToAVariableWhichRequiresDynamicLookup);
}
- ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(VariableProxy::num_ids()));
}
+void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
+ VisitVariableProxyReference(node);
+ ReserveFeedbackSlots(node);
+}
+
+
void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(ThisFunction::num_ids()));
@@ -306,20 +314,35 @@ void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
}
-void AstNumberingVisitor::VisitProperty(Property* node) {
+void AstNumberingVisitor::VisitPropertyReference(Property* node) {
IncrementNodeCount();
- ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(Property::num_ids()));
Visit(node->key());
Visit(node->obj());
}
+void AstNumberingVisitor::VisitReference(Expression* expr) {
+ DCHECK(expr->IsProperty() || expr->IsVariableProxy());
+ if (expr->IsProperty()) {
+ VisitPropertyReference(expr->AsProperty());
+ } else {
+ VisitVariableProxyReference(expr->AsVariableProxy());
+ }
+}
+
+
+void AstNumberingVisitor::VisitProperty(Property* node) {
+ VisitPropertyReference(node);
+ ReserveFeedbackSlots(node);
+}
+
+
void AstNumberingVisitor::VisitAssignment(Assignment* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Assignment::num_ids()));
if (node->is_compound()) VisitBinaryOperation(node->binary_operation());
- Visit(node->target());
+ VisitReference(node->target());
Visit(node->value());
ReserveFeedbackSlots(node);
}
@@ -540,10 +563,6 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
}
VisitDeclarations(scope->declarations());
- if (scope->is_function_scope() && scope->function() != NULL) {
- // Visit the name of the named function expression.
- Visit(scope->function());
- }
VisitStatements(node->body());
return Finish(node);
diff --git a/deps/v8/src/ast-numbering.h b/deps/v8/src/ast-numbering.h
index c068c2f286..57c750cf64 100644
--- a/deps/v8/src/ast-numbering.h
+++ b/deps/v8/src/ast-numbering.h
@@ -8,12 +8,18 @@
namespace v8 {
namespace internal {
+// Forward declarations.
+class FunctionLiteral;
+class Isolate;
+class Zone;
+
namespace AstNumbering {
// Assign type feedback IDs and bailout IDs to an AST node tree.
//
bool Renumber(Isolate* isolate, Zone* zone, FunctionLiteral* function);
}
-}
-} // namespace v8::internal
+
+} // namespace internal
+} // namespace v8
#endif // V8_AST_NUMBERING_H_
diff --git a/deps/v8/src/ast-value-factory.h b/deps/v8/src/ast-value-factory.h
index 2fee0396fd..ca36ac8ea1 100644
--- a/deps/v8/src/ast-value-factory.h
+++ b/deps/v8/src/ast-value-factory.h
@@ -267,7 +267,7 @@ class AstValue : public ZoneObject {
F(make_syntax_error, "MakeSyntaxError") \
F(make_type_error, "MakeTypeError") \
F(native, "native") \
- F(new_target, "new.target") \
+ F(new_target, ".new.target") \
F(next, "next") \
F(proto, "__proto__") \
F(prototype, "prototype") \
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index ec74e4afaf..71a34b65fa 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -59,6 +59,12 @@ bool Expression::IsUndefinedLiteral(Isolate* isolate) const {
}
+bool Expression::IsValidReferenceExpressionOrThis() const {
+ return IsValidReferenceExpression() ||
+ (IsVariableProxy() && AsVariableProxy()->is_this());
+}
+
+
VariableProxy::VariableProxy(Zone* zone, Variable* var, int start_position,
int end_position)
: Expression(zone, start_position),
@@ -95,7 +101,7 @@ void VariableProxy::BindTo(Variable* var) {
void VariableProxy::SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
ICSlotCache* cache) {
variable_feedback_slot_ = slot;
- if (var()->IsUnallocatedOrGlobalSlot()) {
+ if (var()->IsUnallocated()) {
cache->Put(var(), slot);
}
}
@@ -106,7 +112,7 @@ FeedbackVectorRequirements VariableProxy::ComputeFeedbackRequirements(
if (UsesVariableFeedbackSlot()) {
// VariableProxies that point to the same Variable within a function can
// make their loads from the same IC slot.
- if (var()->IsUnallocatedOrGlobalSlot()) {
+ if (var()->IsUnallocated()) {
ZoneHashMap::Entry* entry = cache->Get(var());
if (entry != NULL) {
variable_feedback_slot_ = FeedbackVectorICSlot(
@@ -126,7 +132,7 @@ static int GetStoreICSlots(Expression* expr) {
Property* property = expr->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
if ((assign_type == VARIABLE &&
- expr->AsVariableProxy()->var()->IsUnallocatedOrGlobalSlot()) ||
+ expr->AsVariableProxy()->var()->IsUnallocated()) ||
assign_type == NAMED_PROPERTY || assign_type == KEYED_PROPERTY) {
ic_slots++;
}
@@ -288,8 +294,7 @@ FeedbackVectorRequirements ClassLiteral::ComputeFeedbackRequirements(
if (FunctionLiteral::NeedsHomeObject(value)) ic_slots++;
}
- if (scope() != NULL &&
- class_variable_proxy()->var()->IsUnallocatedOrGlobalSlot()) {
+ if (scope() != NULL && class_variable_proxy()->var()->IsUnallocated()) {
ic_slots++;
}
@@ -506,19 +511,22 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
if (!constant_elements_.is_null()) return;
+ int constants_length =
+ first_spread_index_ >= 0 ? first_spread_index_ : values()->length();
+
// Allocate a fixed array to hold all the object literals.
Handle<JSArray> array = isolate->factory()->NewJSArray(
- FAST_HOLEY_SMI_ELEMENTS, values()->length(), values()->length(),
+ FAST_HOLEY_SMI_ELEMENTS, constants_length, constants_length,
Strength::WEAK, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
// Fill in the literals.
- bool is_simple = true;
+ bool is_simple = (first_spread_index_ < 0);
int depth_acc = 1;
bool is_holey = false;
int array_index = 0;
- for (int n = values()->length(); array_index < n; array_index++) {
+ for (; array_index < constants_length; array_index++) {
Expression* element = values()->at(array_index);
- if (element->IsSpread()) break;
+ DCHECK(!element->IsSpread());
MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
if (m_literal != NULL) {
m_literal->BuildConstants(isolate);
@@ -544,9 +552,6 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
.Assert();
}
- if (array_index != values()->length()) {
- JSArray::SetLength(array, array_index);
- }
JSObject::ValidateElements(array);
Handle<FixedArrayBase> element_values(array->elements());
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 115c59ff80..1366041387 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -5,19 +5,18 @@
#ifndef V8_AST_H_
#define V8_AST_H_
-#include "src/v8.h"
-
#include "src/assembler.h"
#include "src/ast-value-factory.h"
#include "src/bailout-reason.h"
+#include "src/base/flags.h"
+#include "src/base/smart-pointers.h"
#include "src/factory.h"
#include "src/isolate.h"
-#include "src/jsregexp.h"
-#include "src/list-inl.h"
+#include "src/list.h"
#include "src/modules.h"
+#include "src/regexp/jsregexp.h"
#include "src/runtime/runtime.h"
#include "src/small-pointer-list.h"
-#include "src/smart-pointers.h"
#include "src/token.h"
#include "src/types.h"
#include "src/utils.h"
@@ -137,9 +136,6 @@ typedef ZoneList<Handle<Object>> ZoneObjectList;
friend class AstNodeFactory;
-enum AstPropertiesFlag { kDontSelfOptimize, kDontCrankshaft };
-
-
class FeedbackVectorRequirements {
public:
FeedbackVectorRequirements(int slots, int ic_slots)
@@ -179,11 +175,18 @@ class ICSlotCache {
class AstProperties final BASE_EMBEDDED {
public:
- class Flags : public EnumSet<AstPropertiesFlag, int> {};
+ enum Flag {
+ kNoFlags = 0,
+ kDontSelfOptimize = 1 << 0,
+ kDontCrankshaft = 1 << 1
+ };
+
+ typedef base::Flags<Flag> Flags;
explicit AstProperties(Zone* zone) : node_count_(0), spec_(zone) {}
- Flags* flags() { return &flags_; }
+ Flags& flags() { return flags_; }
+ Flags flags() const { return flags_; }
int node_count() { return node_count_; }
void add_node_count(int count) { node_count_ += count; }
@@ -201,6 +204,8 @@ class AstProperties final BASE_EMBEDDED {
ZoneFeedbackVectorSpec spec_;
};
+DEFINE_OPERATORS_FOR_FLAGS(AstProperties::Flags)
+
class AstNode: public ZoneObject {
public:
@@ -336,6 +341,7 @@ class Expression : public AstNode {
kTest
};
+ // True iff the expression is a valid reference expression.
virtual bool IsValidReferenceExpression() const { return false; }
// Helpers for ToBoolean conversion.
@@ -359,6 +365,9 @@ class Expression : public AstNode {
// True if we can prove that the expression is the undefined literal.
bool IsUndefinedLiteral(Isolate* isolate) const;
+ // True iff the expression is a valid target for an assignment.
+ bool IsValidReferenceExpressionOrThis() const;
+
// Expression type bounds
Bounds bounds() const { return bounds_; }
void set_bounds(Bounds bounds) { bounds_ = bounds; }
@@ -1609,10 +1618,12 @@ class ArrayLiteral final : public MaterializedLiteral {
};
protected:
- ArrayLiteral(Zone* zone, ZoneList<Expression*>* values, int literal_index,
- bool is_strong, int pos)
+ ArrayLiteral(Zone* zone, ZoneList<Expression*>* values,
+ int first_spread_index, int literal_index, bool is_strong,
+ int pos)
: MaterializedLiteral(zone, literal_index, is_strong, pos),
- values_(values) {}
+ values_(values),
+ first_spread_index_(first_spread_index) {}
static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
private:
@@ -1620,6 +1631,7 @@ class ArrayLiteral final : public MaterializedLiteral {
Handle<FixedArray> constant_elements_;
ZoneList<Expression*>* values_;
+ int first_spread_index_;
};
@@ -1627,7 +1639,9 @@ class VariableProxy final : public Expression {
public:
DECLARE_NODE_TYPE(VariableProxy)
- bool IsValidReferenceExpression() const override { return !is_this(); }
+ bool IsValidReferenceExpression() const override {
+ return !is_this() && !is_new_target();
+ }
bool IsArguments() const { return is_resolved() && var()->is_arguments(); }
@@ -1658,13 +1672,18 @@ class VariableProxy final : public Expression {
bit_field_ = IsResolvedField::update(bit_field_, true);
}
+ bool is_new_target() const { return IsNewTargetField::decode(bit_field_); }
+ void set_is_new_target() {
+ bit_field_ = IsNewTargetField::update(bit_field_, true);
+ }
+
int end_position() const { return end_position_; }
// Bind this proxy to the variable var.
void BindTo(Variable* var);
bool UsesVariableFeedbackSlot() const {
- return var()->IsUnallocatedOrGlobalSlot() || var()->IsLookupSlot();
+ return var()->IsUnallocated() || var()->IsLookupSlot();
}
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
@@ -1674,7 +1693,6 @@ class VariableProxy final : public Expression {
ICSlotCache* cache) override;
Code::Kind FeedbackICSlotKind(int index) override { return Code::LOAD_IC; }
FeedbackVectorICSlot VariableFeedbackSlot() {
- DCHECK(!UsesVariableFeedbackSlot() || !variable_feedback_slot_.IsInvalid());
return variable_feedback_slot_;
}
@@ -1694,6 +1712,7 @@ class VariableProxy final : public Expression {
class IsThisField : public BitField8<bool, 0, 1> {};
class IsAssignedField : public BitField8<bool, 1, 1> {};
class IsResolvedField : public BitField8<bool, 2, 1> {};
+ class IsNewTargetField : public BitField8<bool, 3, 1> {};
// Start with 16-bit (or smaller) field, which should get packed together
// with Expression's trailing 16-bit field.
@@ -1782,7 +1801,6 @@ class Property final : public Expression {
}
FeedbackVectorICSlot PropertyFeedbackSlot() const {
- DCHECK(!property_feedback_slot_.IsInvalid());
return property_feedback_slot_;
}
@@ -2591,7 +2609,7 @@ class FunctionLiteral final : public Expression {
FunctionKind kind() const { return FunctionKindBits::decode(bitfield_); }
int ast_node_count() { return ast_properties_.node_count(); }
- AstProperties::Flags* flags() { return ast_properties_.flags(); }
+ AstProperties::Flags flags() const { return ast_properties_.flags(); }
void set_ast_properties(AstProperties* ast_properties) {
ast_properties_ = *ast_properties;
}
@@ -2819,7 +2837,7 @@ class SuperCallReference final : public Expression {
new_target_var_(new_target_var),
this_function_var_(this_function_var) {
DCHECK(this_var->is_this());
- DCHECK(new_target_var->raw_name()->IsOneByteEqualTo("new.target"));
+ DCHECK(new_target_var->raw_name()->IsOneByteEqualTo(".new.target"));
DCHECK(this_function_var->raw_name()->IsOneByteEqualTo(".this_function"));
}
@@ -3449,8 +3467,15 @@ class AstNodeFactory final BASE_EMBEDDED {
int literal_index,
bool is_strong,
int pos) {
- return new (zone_) ArrayLiteral(zone_, values, literal_index, is_strong,
- pos);
+ return new (zone_)
+ ArrayLiteral(zone_, values, -1, literal_index, is_strong, pos);
+ }
+
+ ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
+ int first_spread_index, int literal_index,
+ bool is_strong, int pos) {
+ return new (zone_) ArrayLiteral(zone_, values, first_spread_index,
+ literal_index, is_strong, pos);
}
VariableProxy* NewVariableProxy(Variable* var,
diff --git a/deps/v8/src/background-parsing-task.cc b/deps/v8/src/background-parsing-task.cc
index d7df9b4717..cc80e01143 100644
--- a/deps/v8/src/background-parsing-task.cc
+++ b/deps/v8/src/background-parsing-task.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/background-parsing-task.h"
+#include "src/debug/debug.h"
namespace v8 {
namespace internal {
@@ -30,15 +31,8 @@ BackgroundParsingTask::BackgroundParsingTask(
info->set_hash_seed(isolate->heap()->HashSeed());
info->set_global();
info->set_unicode_cache(&source_->unicode_cache);
-
- bool disable_lazy = Compiler::DebuggerWantsEagerCompilation(isolate);
- if (disable_lazy && options == ScriptCompiler::kProduceParserCache) {
- // Producing cached data while parsing eagerly is not supported.
- options = ScriptCompiler::kNoCompileOptions;
- }
-
info->set_compile_options(options);
- info->set_allow_lazy_parsing(!disable_lazy);
+ info->set_allow_lazy_parsing(true);
}
diff --git a/deps/v8/src/background-parsing-task.h b/deps/v8/src/background-parsing-task.h
index 80e1e271d2..e99916169c 100644
--- a/deps/v8/src/background-parsing-task.h
+++ b/deps/v8/src/background-parsing-task.h
@@ -7,9 +7,9 @@
#include "src/base/platform/platform.h"
#include "src/base/platform/semaphore.h"
+#include "src/base/smart-pointers.h"
#include "src/compiler.h"
#include "src/parser.h"
-#include "src/smart-pointers.h"
namespace v8 {
namespace internal {
@@ -23,17 +23,17 @@ struct StreamedSource {
: source_stream(source_stream), encoding(encoding) {}
// Internal implementation of v8::ScriptCompiler::StreamedSource.
- SmartPointer<ScriptCompiler::ExternalSourceStream> source_stream;
+ base::SmartPointer<ScriptCompiler::ExternalSourceStream> source_stream;
ScriptCompiler::StreamedSource::Encoding encoding;
- SmartPointer<ScriptCompiler::CachedData> cached_data;
+ base::SmartPointer<ScriptCompiler::CachedData> cached_data;
// Data needed for parsing, and data needed to to be passed between thread
// between parsing and compilation. These need to be initialized before the
// compilation starts.
UnicodeCache unicode_cache;
- SmartPointer<Zone> zone;
- SmartPointer<ParseInfo> info;
- SmartPointer<Parser> parser;
+ base::SmartPointer<Zone> zone;
+ base::SmartPointer<ParseInfo> info;
+ base::SmartPointer<Parser> parser;
private:
// Prevent copying. Not implemented.
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index 16816348c6..05b8e427c6 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -36,15 +36,10 @@ namespace internal {
"Bad value context for arguments value") \
V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
V(kBailoutWasNotPrepared, "Bailout was not prepared") \
- V(kBinaryStubGenerateFloatingPointCode, \
- "BinaryStub_GenerateFloatingPointCode") \
V(kBothRegistersWereSmisInSelectNonSmi, \
"Both registers were smis in SelectNonSmi") \
- V(kBuiltinFunctionCannotBeOptimized, "Builtin function cannot be optimized") \
V(kCallToAJavaScriptRuntimeFunction, \
"Call to a JavaScript runtime function") \
- V(kCannotTranslatePositionInChangedArea, \
- "Cannot translate position in changed area") \
V(kClassLiteral, "Class literal") \
V(kCodeGenerationFailed, "Code generation failed") \
V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
@@ -54,7 +49,6 @@ namespace internal {
V(kCopyBuffersOverlap, "Copy buffers overlap") \
V(kCouldNotGenerateZero, "Could not generate +0.0") \
V(kCouldNotGenerateNegativeZero, "Could not generate -0.0") \
- V(kDebuggerHasBreakPoints, "Debugger has break points") \
V(kDebuggerStatement, "DebuggerStatement") \
V(kDeclarationInCatchContext, "Declaration in catch context") \
V(kDeclarationInWithContext, "Declaration in with context") \
@@ -70,7 +64,6 @@ namespace internal {
V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
"EmitLoadRegister: Unsupported double immediate") \
V(kEval, "eval") \
- V(kExpected0AsASmiSentinel, "Expected 0 as a Smi sentinel") \
V(kExpectedAlignmentMarker, "Expected alignment marker") \
V(kExpectedAllocationSite, "Expected allocation site") \
V(kExpectedFunctionObject, "Expected function object in register") \
@@ -79,30 +72,23 @@ namespace internal {
V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
V(kExpectedNonNullContext, "Expected non-null context") \
V(kExpectedPositiveZero, "Expected +0.0") \
- V(kExpectedAllocationSiteInCell, "Expected AllocationSite in property cell") \
- V(kExpectedFixedArrayInFeedbackVector, \
- "Expected fixed array in feedback vector") \
- V(kExpectedFixedArrayInRegisterA2, "Expected fixed array in register a2") \
- V(kExpectedFixedArrayInRegisterEbx, "Expected fixed array in register ebx") \
- V(kExpectedFixedArrayInRegisterR2, "Expected fixed array in register r2") \
- V(kExpectedFixedArrayInRegisterRbx, "Expected fixed array in register rbx") \
V(kExpectedNewSpaceObject, "Expected new space object") \
- V(kExpectedSmiOrHeapNumber, "Expected smi or HeapNumber") \
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
V(kExpectingAlignmentForCopyBytes, "Expecting alignment for CopyBytes") \
V(kExportDeclaration, "Export declaration") \
V(kExternalStringExpectedButNotFound, \
"External string expected, but not found") \
- V(kFailedBailedOutLastTime, "Failed/bailed out last time") \
V(kForInStatementOptimizationIsDisabled, \
"ForInStatement optimization is disabled") \
V(kForInStatementWithNonLocalEachVariable, \
"ForInStatement with non-local each variable") \
V(kForOfStatement, "ForOfStatement") \
V(kFrameIsExpectedToBeAligned, "Frame is expected to be aligned") \
+ V(kFunctionBeingDebugged, "Function is being debugged") \
V(kFunctionCallsEval, "Function calls eval") \
- V(kFunctionIsAGenerator, "Function is a generator") \
V(kFunctionWithIllegalRedeclaration, "Function with illegal redeclaration") \
+ V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
+ "The function_data field should be a BytecodeArray on interpreter entry") \
V(kGeneratedCodeIsTooLarge, "Generated code is too large") \
V(kGeneratorFailedToResume, "Generator failed to resume") \
V(kGenerator, "Generator") \
@@ -111,14 +97,10 @@ namespace internal {
V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered") \
V(kHydrogenFilter, "Optimization disabled by filter") \
V(kImportDeclaration, "Import declaration") \
- V(kImproperObjectOnPrototypeChainForStore, \
- "Improper object on prototype chain for store") \
V(kIndexIsNegative, "Index is negative") \
V(kIndexIsTooLarge, "Index is too large") \
V(kInlinedRuntimeFunctionFastOneByteArrayJoin, \
"Inlined runtime function: FastOneByteArrayJoin") \
- V(kInlinedRuntimeFunctionGetFromCache, \
- "Inlined runtime function: GetFromCache") \
V(kInliningBailedOut, "Inlining bailed out") \
V(kInputGPRIsExpectedToHaveUpper32Cleared, \
"Input GPR is expected to have upper32 cleared") \
@@ -149,13 +131,11 @@ namespace internal {
V(kJSObjectWithFastElementsMapHasSlowElements, \
"JSObject with fast elements map has slow elements") \
V(kLetBindingReInitialization, "Let binding re-initialization") \
- V(kLhsHasBeenClobbered, "lhs has been clobbered") \
V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \
V(kLiveEdit, "LiveEdit") \
V(kLookupVariableInCountOperation, "Lookup variable in count operation") \
V(kMapBecameDeprecated, "Map became deprecated") \
V(kMapBecameUnstable, "Map became unstable") \
- V(kMapIsNoLongerInEax, "Map is no longer in eax") \
V(kNativeFunctionLiteral, "Native function literal") \
V(kNeedSmiLiteral, "Need a Smi literal here") \
V(kNoCasesLeft, "No cases left") \
@@ -173,8 +153,6 @@ namespace internal {
"Not enough virtual registers (regalloc)") \
V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array") \
V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
- V(kOddballInStringTableIsNotUndefinedOrTheHole, \
- "Oddball in string table is not undefined or the hole") \
V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
@@ -194,8 +172,6 @@ namespace internal {
"Out of virtual registers while trying to allocate temp register") \
V(kParseScopeError, "Parse/scope error") \
V(kPossibleDirectCallToEval, "Possible direct call to eval") \
- V(kPreconditionsWereNotMet, "Preconditions were not met") \
- V(kPropertyAllocationCountFailed, "Property allocation count failed") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
V(kReferenceToAVariableWhichRequiresDynamicLookup, \
"Reference to a variable which requires dynamic lookup") \
@@ -206,8 +182,6 @@ namespace internal {
V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
V(kRestParameter, "Rest parameters") \
V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
- V(kRhsHasBeenClobbered, "Rhs has been clobbered") \
- V(kScopedBlock, "ScopedBlock") \
V(kScriptContext, "Allocation of script context") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
@@ -235,7 +209,6 @@ namespace internal {
"The instruction to patch should be an ori") \
V(kTheSourceAndDestinationAreTheSame, \
"The source and destination are the same") \
- V(kTheStackPointerIsNotAligned, "The stack pointer is not aligned.") \
V(kTheStackWasCorruptedByMacroAssemblerCall, \
"The stack was corrupted by MacroAssembler::Call()") \
V(kTooManyParametersLocals, "Too many parameters/locals") \
@@ -251,9 +224,6 @@ namespace internal {
V(kUnableToEncodeValueAsSmi, "Unable to encode value as smi") \
V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
- V(kUndefinedValueNotLoaded, "Undefined value not loaded") \
- V(kUndoAllocationOfNonAllocatedMemory, \
- "Undo allocation of non allocated memory") \
V(kUnexpectedAllocationTop, "Unexpected allocation top") \
V(kUnexpectedColorFound, "Unexpected color bit pattern found") \
V(kUnexpectedElementsKindInArrayConstructor, \
@@ -264,8 +234,6 @@ namespace internal {
"Unexpected fallthrough from CharFromCode slow case") \
V(kUnexpectedFallThroughFromStringComparison, \
"Unexpected fall-through from string comparison") \
- V(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode, \
- "Unexpected fall-through in BinaryStub_GenerateFloatingPointCode") \
V(kUnexpectedFallthroughToCharCodeAtSlowCase, \
"Unexpected fallthrough to CharCodeAt slow case") \
V(kUnexpectedFallthroughToCharFromCodeSlowCase, \
@@ -296,7 +264,6 @@ namespace internal {
V(kUnexpectedValue, "Unexpected value") \
V(kUnexpectedUnusedPropertiesOfStringWrapper, \
"Unexpected unused properties of string wrapper") \
- V(kUnimplemented, "unimplemented") \
V(kUnsupportedConstCompoundAssignment, \
"Unsupported const compound assignment") \
V(kUnsupportedCountOperationWithConst, \
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index 661bf80e6e..b8ba4eb8d2 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -177,6 +177,11 @@
// Number of bits to represent the page size for paged spaces. The value of 20
// gives 1Mb bytes per page.
+#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
+// Bump up for Power Linux due to larger (64K) page size.
+const int kPageSizeBits = 22;
+#else
const int kPageSizeBits = 20;
+#endif
#endif // V8_BASE_BUILD_CONFIG_H_
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index b6a11cff34..5162182b7a 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -17,6 +17,7 @@
#include <ostream>
#if V8_OS_WIN
+#include "src/base/atomicops.h"
#include "src/base/lazy-instance.h"
#include "src/base/win32-headers.h"
#endif
@@ -111,7 +112,7 @@ TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
struct mach_timespec TimeDelta::ToMachTimespec() const {
struct mach_timespec ts;
DCHECK(delta_ >= 0);
- ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
+ ts.tv_sec = static_cast<unsigned>(delta_ / Time::kMicrosecondsPerSecond);
ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
Time::kNanosecondsPerMicrosecond;
return ts;
@@ -434,36 +435,35 @@ class HighResolutionTickClock final : public TickClock {
class RolloverProtectedTickClock final : public TickClock {
public:
- // We initialize rollover_ms_ to 1 to ensure that we will never
- // return 0 from TimeTicks::HighResolutionNow() and TimeTicks::Now() below.
- RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
+ RolloverProtectedTickClock() : rollover_(0) {}
virtual ~RolloverProtectedTickClock() {}
int64_t Now() override {
- LockGuard<Mutex> lock_guard(&mutex_);
// We use timeGetTime() to implement TimeTicks::Now(), which rolls over
// every ~49.7 days. We try to track rollover ourselves, which works if
- // TimeTicks::Now() is called at least every 49 days.
+ // TimeTicks::Now() is called at least every 24 days.
// Note that we do not use GetTickCount() here, since timeGetTime() gives
// more predictable delta values, as described here:
// http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
// timeGetTime() provides 1ms granularity when combined with
// timeBeginPeriod(). If the host application for V8 wants fast timers, it
// can use timeBeginPeriod() to increase the resolution.
- DWORD now = timeGetTime();
- if (now < last_seen_now_) {
- rollover_ms_ += V8_INT64_C(0x100000000); // ~49.7 days.
+ // We use a lock-free version because the sampler thread calls it
+ // while having the rest of the world stopped, that could cause a deadlock.
+ base::Atomic32 rollover = base::Acquire_Load(&rollover_);
+ uint32_t now = static_cast<uint32_t>(timeGetTime());
+ if ((now >> 31) != static_cast<uint32_t>(rollover & 1)) {
+ base::Release_CompareAndSwap(&rollover_, rollover, rollover + 1);
+ ++rollover;
}
- last_seen_now_ = now;
- return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
+ uint64_t ms = (static_cast<uint64_t>(rollover) << 31) | now;
+ return static_cast<int64_t>(ms * Time::kMicrosecondsPerMillisecond);
}
bool IsHighResolution() override { return false; }
private:
- Mutex mutex_;
- DWORD last_seen_now_;
- int64_t rollover_ms_;
+ base::Atomic32 rollover_;
};
diff --git a/deps/v8/src/smart-pointers.h b/deps/v8/src/base/smart-pointers.h
index c4bbd0b45e..6528fca92d 100644
--- a/deps/v8/src/smart-pointers.h
+++ b/deps/v8/src/base/smart-pointers.h
@@ -2,14 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SMART_POINTERS_H_
-#define V8_SMART_POINTERS_H_
+#ifndef V8_BASE_SMART_POINTERS_H_
+#define V8_BASE_SMART_POINTERS_H_
namespace v8 {
-namespace internal {
+namespace base {
-
-template<typename Deallocator, typename T>
+template <typename Deallocator, typename T>
class SmartPointerBase {
public:
// Default constructor. Constructs an empty scoped pointer.
@@ -20,8 +19,7 @@ class SmartPointerBase {
// Copy constructor removes the pointer from the original to avoid double
// freeing.
- SmartPointerBase(const SmartPointerBase<Deallocator, T>& rhs)
- : p_(rhs.p_) {
+ SmartPointerBase(const SmartPointerBase<Deallocator, T>& rhs) : p_(rhs.p_) {
const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
}
@@ -32,14 +30,10 @@ class SmartPointerBase {
T* get() const { return p_; }
// You can use [n] to index as if it was a plain pointer.
- T& operator[](size_t i) {
- return p_[i];
- }
+ T& operator[](size_t i) { return p_[i]; }
// You can use [n] to index as if it was a plain pointer.
- const T& operator[](size_t i) const {
- return p_[i];
- }
+ const T& operator[](size_t i) const { return p_[i]; }
// We don't have implicit conversion to a T* since that hinders migration:
// You would not be able to change a method from returning a T* to
@@ -79,7 +73,9 @@ class SmartPointerBase {
// When the destructor of the scoped pointer is executed the plain pointer
// is deleted using DeleteArray. This implies that you must allocate with
// NewArray.
- ~SmartPointerBase() { if (p_) Deallocator::Delete(p_); }
+ ~SmartPointerBase() {
+ if (p_) Deallocator::Delete(p_);
+ }
private:
T* p_;
@@ -88,43 +84,39 @@ class SmartPointerBase {
// A 'scoped array pointer' that calls DeleteArray on its pointer when the
// destructor is called.
-template<typename T>
+template <typename T>
struct ArrayDeallocator {
- static void Delete(T* array) {
- DeleteArray(array);
- }
+ static void Delete(T* array) { delete[] array; }
};
-template<typename T>
-class SmartArrayPointer: public SmartPointerBase<ArrayDeallocator<T>, T> {
+template <typename T>
+class SmartArrayPointer : public SmartPointerBase<ArrayDeallocator<T>, T> {
public:
- SmartArrayPointer() { }
+ SmartArrayPointer() {}
explicit SmartArrayPointer(T* ptr)
- : SmartPointerBase<ArrayDeallocator<T>, T>(ptr) { }
+ : SmartPointerBase<ArrayDeallocator<T>, T>(ptr) {}
SmartArrayPointer(const SmartArrayPointer<T>& rhs)
- : SmartPointerBase<ArrayDeallocator<T>, T>(rhs) { }
+ : SmartPointerBase<ArrayDeallocator<T>, T>(rhs) {}
};
-template<typename T>
+template <typename T>
struct ObjectDeallocator {
- static void Delete(T* object) {
- delete object;
- }
+ static void Delete(T* object) { delete object; }
};
-
-template<typename T>
-class SmartPointer: public SmartPointerBase<ObjectDeallocator<T>, T> {
+template <typename T>
+class SmartPointer : public SmartPointerBase<ObjectDeallocator<T>, T> {
public:
- SmartPointer() { }
+ SmartPointer() {}
explicit SmartPointer(T* ptr)
- : SmartPointerBase<ObjectDeallocator<T>, T>(ptr) { }
+ : SmartPointerBase<ObjectDeallocator<T>, T>(ptr) {}
SmartPointer(const SmartPointer<T>& rhs)
- : SmartPointerBase<ObjectDeallocator<T>, T>(rhs) { }
+ : SmartPointerBase<ObjectDeallocator<T>, T>(rhs) {}
};
-} } // namespace v8::internal
+} // namespace base
+} // namespace v8
#endif // V8_SMART_POINTERS_H_
diff --git a/deps/v8/src/basic-block-profiler.h b/deps/v8/src/basic-block-profiler.h
index 0f7b15533d..2e7ac9c804 100644
--- a/deps/v8/src/basic-block-profiler.h
+++ b/deps/v8/src/basic-block-profiler.h
@@ -8,8 +8,9 @@
#include <iosfwd>
#include <list>
#include <string>
+#include <vector>
-#include "src/v8.h"
+#include "src/base/macros.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/bignum-dtoa.cc b/deps/v8/src/bignum-dtoa.cc
index ace9e37193..78ee7aa3e5 100644
--- a/deps/v8/src/bignum-dtoa.cc
+++ b/deps/v8/src/bignum-dtoa.cc
@@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/bignum-dtoa.h"
+
#include <cmath>
#include "src/base/logging.h"
-#include "src/utils.h"
-
-#include "src/bignum-dtoa.h"
-
#include "src/bignum.h"
#include "src/double.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/bignum-dtoa.h b/deps/v8/src/bignum-dtoa.h
index fc160aecd4..d42801bd69 100644
--- a/deps/v8/src/bignum-dtoa.h
+++ b/deps/v8/src/bignum-dtoa.h
@@ -5,6 +5,8 @@
#ifndef V8_BIGNUM_DTOA_H_
#define V8_BIGNUM_DTOA_H_
+#include "src/vector.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/bignum.cc b/deps/v8/src/bignum.cc
index e70987a82d..9baf77e7f2 100644
--- a/deps/v8/src/bignum.cc
+++ b/deps/v8/src/bignum.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/bignum.h"
#include "src/utils.h"
diff --git a/deps/v8/src/bignum.h b/deps/v8/src/bignum.h
index 744768f874..7ebdae47bc 100644
--- a/deps/v8/src/bignum.h
+++ b/deps/v8/src/bignum.h
@@ -5,6 +5,8 @@
#ifndef V8_BIGNUM_H_
#define V8_BIGNUM_H_
+#include "src/vector.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 43fc0eb835..790a80b239 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -25,34 +25,11 @@ Bootstrapper::Bootstrapper(Isolate* isolate)
nesting_(0),
extensions_cache_(Script::TYPE_EXTENSION) {}
-
-template <class Source>
-inline FixedArray* GetCache(Heap* heap);
-
-
-template <>
-FixedArray* GetCache<Natives>(Heap* heap) {
- return heap->natives_source_cache();
-}
-
-
-template <>
-FixedArray* GetCache<ExperimentalNatives>(Heap* heap) {
- return heap->experimental_natives_source_cache();
-}
-
-
-template <>
-FixedArray* GetCache<ExtraNatives>(Heap* heap) {
- return heap->extra_natives_source_cache();
-}
-
-
template <class Source>
Handle<String> Bootstrapper::SourceLookup(int index) {
DCHECK(0 <= index && index < Source::GetBuiltinsCount());
Heap* heap = isolate_->heap();
- if (GetCache<Source>(heap)->get(index)->IsUndefined()) {
+ if (Source::GetSourceCache(heap)->get(index)->IsUndefined()) {
// We can use external strings for the natives.
Vector<const char> source = Source::GetScriptSource(index);
NativesExternalStringResource* resource =
@@ -63,9 +40,10 @@ Handle<String> Bootstrapper::SourceLookup(int index) {
.ToHandleChecked();
// Mark this external string with a special map.
source_code->set_map(isolate_->heap()->native_source_string_map());
- GetCache<Source>(heap)->set(index, *source_code);
+ Source::GetSourceCache(heap)->set(index, *source_code);
}
- Handle<Object> cached_source(GetCache<Source>(heap)->get(index), isolate_);
+ Handle<Object> cached_source(Source::GetSourceCache(heap)->get(index),
+ isolate_);
return Handle<String>::cast(cached_source);
}
@@ -74,6 +52,7 @@ template Handle<String> Bootstrapper::SourceLookup<Natives>(int index);
template Handle<String> Bootstrapper::SourceLookup<ExperimentalNatives>(
int index);
template Handle<String> Bootstrapper::SourceLookup<ExtraNatives>(int index);
+template Handle<String> Bootstrapper::SourceLookup<CodeStubNatives>(int index);
void Bootstrapper::Initialize(bool create_heap_objects) {
@@ -139,9 +118,10 @@ void DeleteNativeSources(Object* maybe_array) {
void Bootstrapper::TearDown() {
- DeleteNativeSources(isolate_->heap()->natives_source_cache());
- DeleteNativeSources(isolate_->heap()->experimental_natives_source_cache());
- DeleteNativeSources(isolate_->heap()->extra_natives_source_cache());
+ DeleteNativeSources(Natives::GetSourceCache(isolate_->heap()));
+ DeleteNativeSources(ExperimentalNatives::GetSourceCache(isolate_->heap()));
+ DeleteNativeSources(ExtraNatives::GetSourceCache(isolate_->heap()));
+ DeleteNativeSources(CodeStubNatives::GetSourceCache(isolate_->heap()));
extensions_cache_.Initialize(isolate_, false); // Yes, symmetrical
}
@@ -150,7 +130,7 @@ class Genesis BASE_EMBEDDED {
public:
Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
- v8::ExtensionConfiguration* extensions);
+ v8::ExtensionConfiguration* extensions, ContextType context_type);
~Genesis() { }
Isolate* isolate() const { return isolate_; }
@@ -203,17 +183,13 @@ class Genesis BASE_EMBEDDED {
void HookUpGlobalThisBinding(Handle<FixedArray> outdated_contexts);
// New context initialization. Used for creating a context from scratch.
void InitializeGlobal(Handle<GlobalObject> global_object,
- Handle<JSFunction> empty_function);
+ Handle<JSFunction> empty_function,
+ ContextType context_type);
void InitializeExperimentalGlobal();
- // Installs the contents of the native .js files on the global objects.
- // Used for creating a context from scratch.
- void InstallNativeFunctions();
- void InstallExperimentalNativeFunctions();
// Typed arrays are not serializable and have to initialized afterwards.
void InitializeBuiltinTypedArrays();
#define DECLARE_FEATURE_INITIALIZATION(id, descr) \
- void InstallNativeFunctions_##id(); \
void InitializeGlobal_##id();
HARMONY_INPROGRESS(DECLARE_FEATURE_INITIALIZATION)
@@ -224,18 +200,15 @@ class Genesis BASE_EMBEDDED {
Handle<JSFunction> InstallInternalArray(Handle<JSObject> target,
const char* name,
ElementsKind elements_kind);
- bool InstallNatives();
+ bool InstallNatives(ContextType context_type);
- void InstallTypedArray(
- const char* name,
- ElementsKind elements_kind,
- Handle<JSFunction>* fun,
- Handle<Map>* external_map);
+ void InstallTypedArray(const char* name, ElementsKind elements_kind,
+ Handle<JSFunction>* fun);
bool InstallExperimentalNatives();
bool InstallExtraNatives();
+ bool InstallDebuggerNatives();
void InstallBuiltinFunctionIds();
void InstallExperimentalBuiltinFunctionIds();
- void InstallJSFunctionResultCaches();
void InitializeNormalizedMapCaches();
enum ExtensionTraversalState {
@@ -269,7 +242,6 @@ class Genesis BASE_EMBEDDED {
v8::RegisteredExtension* current,
ExtensionStates* extension_states);
static bool InstallSpecialObjects(Handle<Context> native_context);
- bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
bool ConfigureApiObject(Handle<JSObject> object,
Handle<ObjectTemplateInfo> object_template);
bool ConfigureGlobalObjects(
@@ -312,13 +284,6 @@ class Genesis BASE_EMBEDDED {
FunctionMode function_mode);
void SetStrongFunctionInstanceDescriptor(Handle<Map> map);
- static bool CompileBuiltin(Isolate* isolate, int index);
- static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
- static bool CompileExtraBuiltin(Isolate* isolate, int index);
- static bool CompileNative(Isolate* isolate, Vector<const char> name,
- Handle<String> source, int argc,
- Handle<Object> argv[]);
-
static bool CallUtilsFunction(Isolate* isolate, const char* name);
static bool CompileExtension(Isolate* isolate, v8::Extension* extension);
@@ -350,18 +315,39 @@ void Bootstrapper::Iterate(ObjectVisitor* v) {
Handle<Context> Bootstrapper::CreateEnvironment(
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
- v8::ExtensionConfiguration* extensions) {
+ v8::ExtensionConfiguration* extensions, ContextType context_type) {
HandleScope scope(isolate_);
- Genesis genesis(
- isolate_, maybe_global_proxy, global_proxy_template, extensions);
+ Genesis genesis(isolate_, maybe_global_proxy, global_proxy_template,
+ extensions, context_type);
Handle<Context> env = genesis.result();
- if (env.is_null() || !InstallExtensions(env, extensions)) {
+ if (env.is_null() ||
+ (context_type != THIN_CONTEXT && !InstallExtensions(env, extensions))) {
return Handle<Context>();
}
return scope.CloseAndEscape(env);
}
+bool Bootstrapper::CreateCodeStubContext(Isolate* isolate) {
+ HandleScope scope(isolate);
+ SaveContext save_context(isolate);
+ BootstrapperActive active(this);
+
+ v8::ExtensionConfiguration no_extensions;
+ Handle<Context> native_context = CreateEnvironment(
+ MaybeHandle<JSGlobalProxy>(), v8::Local<v8::ObjectTemplate>(),
+ &no_extensions, THIN_CONTEXT);
+ isolate->heap()->set_code_stub_context(*native_context);
+ isolate->set_context(*native_context);
+ Handle<JSObject> code_stub_exports =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ JSObject::NormalizeProperties(code_stub_exports, CLEAR_INOBJECT_PROPERTIES, 2,
+ "container to export to extra natives");
+ isolate->heap()->set_code_stub_exports_object(*code_stub_exports);
+ return InstallCodeStubNatives(isolate);
+}
+
+
static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
// object.__proto__ = proto;
Handle<Map> old_map = Handle<Map>(object->map());
@@ -514,7 +500,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
int instance_size = JSObject::kHeaderSize + kPointerSize * unused;
Handle<Map> object_function_map =
factory->NewMap(JS_OBJECT_TYPE, instance_size);
- object_function_map->set_inobject_properties(unused);
+ object_function_map->SetInObjectProperties(unused);
JSFunction::SetInitialMap(object_fun, object_function_map,
isolate->factory()->null_value());
object_function_map->set_unused_property_fields(unused);
@@ -595,27 +581,35 @@ void Genesis::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
PropertyAttributes roc_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
- // Add length.
if (function_mode == BOUND_FUNCTION) {
- Handle<String> length_string = isolate()->factory()->length_string();
- DataDescriptor d(length_string, 0, roc_attribs, Representation::Tagged());
- map->AppendDescriptor(&d);
+ { // Add length.
+ Handle<String> length_string = isolate()->factory()->length_string();
+ DataDescriptor d(length_string, 0, roc_attribs, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+ { // Add name.
+ Handle<String> name_string = isolate()->factory()->name_string();
+ DataDescriptor d(name_string, 1, roc_attribs, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
} else {
DCHECK(function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
function_mode == FUNCTION_WITH_READONLY_PROTOTYPE ||
function_mode == FUNCTION_WITHOUT_PROTOTYPE);
- Handle<AccessorInfo> length =
- Accessors::FunctionLengthInfo(isolate(), roc_attribs);
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
- length, roc_attribs);
- map->AppendDescriptor(&d);
- }
- Handle<AccessorInfo> name =
- Accessors::FunctionNameInfo(isolate(), roc_attribs);
- { // Add name.
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
- roc_attribs);
- map->AppendDescriptor(&d);
+ { // Add length.
+ Handle<AccessorInfo> length =
+ Accessors::FunctionLengthInfo(isolate(), roc_attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
+ length, roc_attribs);
+ map->AppendDescriptor(&d);
+ }
+ { // Add name.
+ Handle<AccessorInfo> name =
+ Accessors::FunctionNameInfo(isolate(), roc_attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
+ roc_attribs);
+ map->AppendDescriptor(&d);
+ }
}
if (IsFunctionModeWithPrototype(function_mode)) {
// Add prototype.
@@ -992,7 +986,8 @@ void Genesis::HookUpGlobalObject(Handle<GlobalObject> global_object,
// This is only called if we are not using snapshots. The equivalent
// work in the snapshot case is done in HookUpGlobalObject.
void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
- Handle<JSFunction> empty_function) {
+ Handle<JSFunction> empty_function,
+ ContextType context_type) {
// --- N a t i v e C o n t e x t ---
// Use the empty function as closure (no scope info).
native_context()->set_closure(*empty_function);
@@ -1124,12 +1119,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
{ // --- D a t e ---
// Builtin functions for Date.prototype.
- Handle<JSFunction> date_fun =
- InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal);
-
- native_context()->set_date_function(*date_fun);
+ InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
}
@@ -1144,7 +1135,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
DCHECK(regexp_fun->has_initial_map());
Handle<Map> initial_map(regexp_fun->initial_map());
- DCHECK_EQ(0, initial_map->inobject_properties());
+ DCHECK_EQ(0, initial_map->GetInObjectProperties());
PropertyAttributes final =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
@@ -1189,8 +1180,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
}
static const int num_fields = JSRegExp::kInObjectFieldCount;
- initial_map->set_inobject_properties(num_fields);
- initial_map->set_pre_allocated_property_fields(num_fields);
+ initial_map->SetInObjectProperties(num_fields);
initial_map->set_unused_property_fields(0);
initial_map->set_instance_size(initial_map->instance_size() +
num_fields * kPointerSize);
@@ -1217,6 +1207,12 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
JSRegExp::Flags(0), 0);
}
+ // Initialize the embedder data slot.
+ Handle<FixedArray> embedder_data = factory->NewFixedArray(3);
+ native_context()->set_embedder_data(*embedder_data);
+
+ if (context_type == THIN_CONTEXT) return;
+
{ // -- J S O N
Handle<String> name = factory->InternalizeUtf8String("JSON");
Handle<JSFunction> cons = factory->NewFunction(name);
@@ -1226,7 +1222,18 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
DCHECK(json_object->IsJSObject());
JSObject::AddProperty(global, name, json_object, DONT_ENUM);
- native_context()->set_json_object(*json_object);
+ }
+
+ { // -- M a t h
+ Handle<String> name = factory->InternalizeUtf8String("Math");
+ Handle<JSFunction> cons = factory->NewFunction(name);
+ JSFunction::SetInstancePrototype(
+ cons,
+ Handle<Object>(native_context()->initial_object_prototype(), isolate));
+ cons->SetInstanceClassName(*name);
+ Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
+ DCHECK(json_object->IsJSObject());
+ JSObject::AddProperty(global, name, json_object, DONT_ENUM);
}
{ // -- A r r a y B u f f e r
@@ -1240,17 +1247,12 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
}
{ // -- T y p e d A r r a y s
-#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
- { \
- Handle<JSFunction> fun; \
- Handle<Map> external_map; \
- InstallTypedArray(#Type "Array", \
- TYPE##_ELEMENTS, \
- &fun, \
- &external_map); \
- native_context()->set_##type##_array_fun(*fun); \
- native_context()->set_##type##_array_external_map(*external_map); \
- }
+#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ { \
+ Handle<JSFunction> fun; \
+ InstallTypedArray(#Type "Array", TYPE##_ELEMENTS, &fun); \
+ native_context()->set_##type##_array_fun(*fun); \
+ }
TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
#undef INSTALL_TYPED_ARRAY
@@ -1285,7 +1287,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
DCHECK_EQ(JSGeneratorObject::kResultSize,
iterator_result_map->instance_size());
DCHECK_EQ(JSGeneratorObject::kResultPropertyCount,
- iterator_result_map->inobject_properties());
+ iterator_result_map->GetInObjectProperties());
Map::EnsureDescriptorSlack(iterator_result_map,
JSGeneratorObject::kResultPropertyCount);
@@ -1300,8 +1302,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
iterator_result_map->AppendDescriptor(&done_descr);
iterator_result_map->set_unused_property_fields(0);
- iterator_result_map->set_pre_allocated_property_fields(
- JSGeneratorObject::kResultPropertyCount);
DCHECK_EQ(JSGeneratorObject::kResultSize,
iterator_result_map->instance_size());
native_context()->set_iterator_result_map(*iterator_result_map);
@@ -1342,16 +1342,15 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
// @@iterator method is added later.
map->set_function_with_prototype(true);
- map->set_pre_allocated_property_fields(2);
- map->set_inobject_properties(2);
+ map->SetInObjectProperties(2);
native_context()->set_sloppy_arguments_map(*map);
DCHECK(!function->has_initial_map());
JSFunction::SetInitialMap(function, map,
isolate->initial_object_prototype());
- DCHECK(map->inobject_properties() > Heap::kArgumentsCalleeIndex);
- DCHECK(map->inobject_properties() > Heap::kArgumentsLengthIndex);
+ DCHECK(map->GetInObjectProperties() > Heap::kArgumentsCalleeIndex);
+ DCHECK(map->GetInObjectProperties() > Heap::kArgumentsLengthIndex);
DCHECK(!map->is_dictionary_map());
DCHECK(IsFastObjectElementsKind(map->elements_kind()));
}
@@ -1360,12 +1359,12 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Handle<Map> map = isolate->sloppy_arguments_map();
map = Map::Copy(map, "FastAliasedArguments");
map->set_elements_kind(FAST_SLOPPY_ARGUMENTS_ELEMENTS);
- DCHECK_EQ(2, map->pre_allocated_property_fields());
+ DCHECK_EQ(2, map->GetInObjectProperties());
native_context()->set_fast_aliased_arguments_map(*map);
map = Map::Copy(map, "SlowAliasedArguments");
map->set_elements_kind(SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
- DCHECK_EQ(2, map->pre_allocated_property_fields());
+ DCHECK_EQ(2, map->GetInObjectProperties());
native_context()->set_slow_aliased_arguments_map(*map);
}
@@ -1412,8 +1411,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
DCHECK_EQ(native_context()->object_function()->prototype(),
*isolate->initial_object_prototype());
Map::SetPrototype(map, isolate->initial_object_prototype());
- map->set_pre_allocated_property_fields(1);
- map->set_inobject_properties(1);
+ map->SetInObjectProperties(1);
// Copy constructor from the sloppy arguments boilerplate.
map->SetConstructor(
@@ -1421,7 +1419,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
native_context()->set_strict_arguments_map(*map);
- DCHECK(map->inobject_properties() > Heap::kArgumentsLengthIndex);
+ DCHECK(map->GetInObjectProperties() > Heap::kArgumentsLengthIndex);
DCHECK(!map->is_dictionary_map());
DCHECK(IsFastObjectElementsKind(map->elements_kind()));
}
@@ -1462,18 +1460,11 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
native_context()->set_call_as_constructor_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
}
-
- // Initialize the embedder data slot.
- Handle<FixedArray> embedder_data = factory->NewFixedArray(3);
- native_context()->set_embedder_data(*embedder_data);
}
-void Genesis::InstallTypedArray(
- const char* name,
- ElementsKind elements_kind,
- Handle<JSFunction>* fun,
- Handle<Map>* external_map) {
+void Genesis::InstallTypedArray(const char* name, ElementsKind elements_kind,
+ Handle<JSFunction>* fun) {
Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
Handle<JSFunction> result = InstallFunction(
global, name, JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize,
@@ -1486,9 +1477,6 @@ void Genesis::InstallTypedArray(
JSFunction::SetInitialMap(result, initial_map,
handle(initial_map->prototype(), isolate()));
*fun = result;
-
- ElementsKind external_kind = GetNextTransitionElementsKind(elements_kind);
- *external_map = Map::AsElementsKind(initial_map, external_kind);
}
@@ -1502,18 +1490,21 @@ void Genesis::InitializeExperimentalGlobal() {
}
-bool Genesis::CompileBuiltin(Isolate* isolate, int index) {
+bool Bootstrapper::CompileBuiltin(Isolate* isolate, int index) {
Vector<const char> name = Natives::GetScriptName(index);
Handle<String> source_code =
isolate->bootstrapper()->SourceLookup<Natives>(index);
Handle<Object> global = isolate->global_object();
Handle<Object> utils = isolate->natives_utils_object();
Handle<Object> args[] = {global, utils};
- return CompileNative(isolate, name, source_code, arraysize(args), args);
+
+ return Bootstrapper::CompileNative(
+ isolate, name, Handle<JSObject>(isolate->native_context()->builtins()),
+ source_code, arraysize(args), args);
}
-bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) {
+bool Bootstrapper::CompileExperimentalBuiltin(Isolate* isolate, int index) {
HandleScope scope(isolate);
Vector<const char> name = ExperimentalNatives::GetScriptName(index);
Handle<String> source_code =
@@ -1521,25 +1512,44 @@ bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) {
Handle<Object> global = isolate->global_object();
Handle<Object> utils = isolate->natives_utils_object();
Handle<Object> args[] = {global, utils};
- return CompileNative(isolate, name, source_code, arraysize(args), args);
+ return Bootstrapper::CompileNative(
+ isolate, name, Handle<JSObject>(isolate->native_context()->builtins()),
+ source_code, arraysize(args), args);
}
-bool Genesis::CompileExtraBuiltin(Isolate* isolate, int index) {
+bool Bootstrapper::CompileExtraBuiltin(Isolate* isolate, int index) {
HandleScope scope(isolate);
Vector<const char> name = ExtraNatives::GetScriptName(index);
Handle<String> source_code =
isolate->bootstrapper()->SourceLookup<ExtraNatives>(index);
Handle<Object> global = isolate->global_object();
- Handle<Object> exports = isolate->extras_exports_object();
+ Handle<Object> binding = isolate->extras_binding_object();
+ Handle<Object> args[] = {global, binding};
+ return Bootstrapper::CompileNative(
+ isolate, name, Handle<JSObject>(isolate->native_context()->builtins()),
+ source_code, arraysize(args), args);
+}
+
+
+bool Bootstrapper::CompileCodeStubBuiltin(Isolate* isolate, int index) {
+ HandleScope scope(isolate);
+ Vector<const char> name = CodeStubNatives::GetScriptName(index);
+ Handle<String> source_code =
+ isolate->bootstrapper()->SourceLookup<CodeStubNatives>(index);
+ Handle<JSObject> global(isolate->global_object());
+ Handle<JSObject> exports(isolate->heap()->code_stub_exports_object());
Handle<Object> args[] = {global, exports};
- return CompileNative(isolate, name, source_code, arraysize(args), args);
+ bool result =
+ CompileNative(isolate, name, global, source_code, arraysize(args), args);
+ return result;
}
-bool Genesis::CompileNative(Isolate* isolate, Vector<const char> name,
- Handle<String> source, int argc,
- Handle<Object> argv[]) {
+bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
+ Handle<JSObject> receiver,
+ Handle<String> source, int argc,
+ Handle<Object> argv[]) {
SuppressDebug compiling_natives(isolate->debug());
// During genesis, the boilerplate for stack overflow won't work until the
// environment has been at least partially initialized. Add a stack check
@@ -1563,7 +1573,6 @@ bool Genesis::CompileNative(Isolate* isolate, Vector<const char> name,
DCHECK(context->IsNativeContext());
Handle<Context> runtime_context(context->runtime_context());
- Handle<JSBuiltinsObject> receiver(context->builtins());
Handle<JSFunction> fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(function_info,
runtime_context);
@@ -1665,84 +1674,6 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
}
-#define INSTALL_NATIVE(Type, name, var) \
- Handle<String> var##_name = \
- factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR(name)); \
- Handle<Object> var##_native = \
- Object::GetProperty(handle(native_context()->builtins()), var##_name) \
- .ToHandleChecked(); \
- native_context()->set_##var(Type::cast(*var##_native));
-
-
-void Genesis::InstallNativeFunctions() {
- HandleScope scope(isolate());
- INSTALL_NATIVE(JSFunction, "$createDate", create_date_fun);
-
- INSTALL_NATIVE(JSFunction, "$toNumber", to_number_fun);
- INSTALL_NATIVE(JSFunction, "$toString", to_string_fun);
- INSTALL_NATIVE(JSFunction, "$toDetailString", to_detail_string_fun);
- INSTALL_NATIVE(JSFunction, "$toObject", to_object_fun);
- INSTALL_NATIVE(JSFunction, "$toInteger", to_integer_fun);
- INSTALL_NATIVE(JSFunction, "$toUint32", to_uint32_fun);
- INSTALL_NATIVE(JSFunction, "$toInt32", to_int32_fun);
- INSTALL_NATIVE(JSFunction, "$toLength", to_length_fun);
-
- INSTALL_NATIVE(JSFunction, "$globalEval", global_eval_fun);
- INSTALL_NATIVE(JSFunction, "$getStackTraceLine", get_stack_trace_line_fun);
- INSTALL_NATIVE(JSFunction, "$toCompletePropertyDescriptor",
- to_complete_property_descriptor);
-
- INSTALL_NATIVE(Symbol, "$promiseStatus", promise_status);
- INSTALL_NATIVE(Symbol, "$promiseValue", promise_value);
- INSTALL_NATIVE(JSFunction, "$promiseCreate", promise_create);
- INSTALL_NATIVE(JSFunction, "$promiseResolve", promise_resolve);
- INSTALL_NATIVE(JSFunction, "$promiseReject", promise_reject);
- INSTALL_NATIVE(JSFunction, "$promiseChain", promise_chain);
- INSTALL_NATIVE(JSFunction, "$promiseCatch", promise_catch);
- INSTALL_NATIVE(JSFunction, "$promiseThen", promise_then);
-
- INSTALL_NATIVE(JSFunction, "$observeNotifyChange", observers_notify_change);
- INSTALL_NATIVE(JSFunction, "$observeEnqueueSpliceRecord",
- observers_enqueue_splice);
- INSTALL_NATIVE(JSFunction, "$observeBeginPerformSplice",
- observers_begin_perform_splice);
- INSTALL_NATIVE(JSFunction, "$observeEndPerformSplice",
- observers_end_perform_splice);
- INSTALL_NATIVE(JSFunction, "$observeNativeObjectObserve",
- native_object_observe);
- INSTALL_NATIVE(JSFunction, "$observeNativeObjectGetNotifier",
- native_object_get_notifier);
- INSTALL_NATIVE(JSFunction, "$observeNativeObjectNotifierPerformChange",
- native_object_notifier_perform_change);
- INSTALL_NATIVE(JSFunction, "$arrayValues", array_values_iterator);
- INSTALL_NATIVE(JSFunction, "$mapGet", map_get);
- INSTALL_NATIVE(JSFunction, "$mapSet", map_set);
- INSTALL_NATIVE(JSFunction, "$mapHas", map_has);
- INSTALL_NATIVE(JSFunction, "$mapDelete", map_delete);
- INSTALL_NATIVE(JSFunction, "$setAdd", set_add);
- INSTALL_NATIVE(JSFunction, "$setHas", set_has);
- INSTALL_NATIVE(JSFunction, "$setDelete", set_delete);
- INSTALL_NATIVE(JSFunction, "$mapFromArray", map_from_array);
- INSTALL_NATIVE(JSFunction, "$setFromArray", set_from_array);
-}
-
-
-void Genesis::InstallExperimentalNativeFunctions() {
- if (FLAG_harmony_proxies) {
- INSTALL_NATIVE(JSFunction, "$proxyDerivedHasTrap", derived_has_trap);
- INSTALL_NATIVE(JSFunction, "$proxyDerivedGetTrap", derived_get_trap);
- INSTALL_NATIVE(JSFunction, "$proxyDerivedSetTrap", derived_set_trap);
- INSTALL_NATIVE(JSFunction, "$proxyEnumerate", proxy_enumerate);
- }
-
-#define INSTALL_NATIVE_FUNCTIONS_FOR(id, descr) InstallNativeFunctions_##id();
- HARMONY_INPROGRESS(INSTALL_NATIVE_FUNCTIONS_FOR)
- HARMONY_STAGED(INSTALL_NATIVE_FUNCTIONS_FOR)
- HARMONY_SHIPPING(INSTALL_NATIVE_FUNCTIONS_FOR)
-#undef INSTALL_NATIVE_FUNCTIONS_FOR
-}
-
-
template <typename Data>
Data* SetBuiltinTypedArray(Isolate* isolate, Handle<JSBuiltinsObject> builtins,
ExternalArrayType type, Data* data,
@@ -1762,7 +1693,8 @@ Data* SetBuiltinTypedArray(Isolate* isolate, Handle<JSBuiltinsObject> builtins,
// Reset property cell type before (re)initializing.
JSBuiltinsObject::InvalidatePropertyCell(builtins, name_string);
JSObject::SetOwnPropertyIgnoreAttributes(builtins, name_string, typed_array,
- DONT_DELETE).Assert();
+ FROZEN)
+ .Assert();
return data;
}
@@ -1798,8 +1730,92 @@ void Genesis::InitializeBuiltinTypedArrays() {
}
-#define EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(id) \
- void Genesis::InstallNativeFunctions_##id() {}
+#define INSTALL_NATIVE(Type, name, var) \
+ Handle<Object> var##_native = \
+ Object::GetProperty(isolate, container, name, STRICT).ToHandleChecked(); \
+ DCHECK(var##_native->Is##Type()); \
+ native_context->set_##var(Type::cast(*var##_native));
+
+
+void Bootstrapper::ImportNatives(Isolate* isolate, Handle<JSObject> container) {
+ HandleScope scope(isolate);
+ Handle<Context> native_context = isolate->native_context();
+ INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
+ INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
+ INSTALL_NATIVE(JSFunction, "ToString", to_string_fun);
+ INSTALL_NATIVE(JSFunction, "ToDetailString", to_detail_string_fun);
+ INSTALL_NATIVE(JSFunction, "NoSideEffectToString",
+ no_side_effect_to_string_fun);
+ INSTALL_NATIVE(JSFunction, "ToInteger", to_integer_fun);
+ INSTALL_NATIVE(JSFunction, "ToLength", to_length_fun);
+
+ INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun);
+ INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
+ INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
+ to_complete_property_descriptor);
+ INSTALL_NATIVE(JSFunction, "ObjectDefineOwnProperty",
+ object_define_own_property);
+ INSTALL_NATIVE(JSFunction, "ObjectGetOwnPropertyDescriptor",
+ object_get_own_property_descriptor);
+ INSTALL_NATIVE(JSFunction, "MessageGetLineNumber", message_get_line_number);
+ INSTALL_NATIVE(JSFunction, "MessageGetColumnNumber",
+ message_get_column_number);
+ INSTALL_NATIVE(JSFunction, "MessageGetSourceLine", message_get_source_line);
+ INSTALL_NATIVE(JSObject, "StackOverflowBoilerplate",
+ stack_overflow_boilerplate);
+ INSTALL_NATIVE(JSFunction, "JsonSerializeAdapter", json_serialize_adapter);
+
+ INSTALL_NATIVE(JSFunction, "Error", error_function);
+ INSTALL_NATIVE(JSFunction, "EvalError", eval_error_function);
+ INSTALL_NATIVE(JSFunction, "RangeError", range_error_function);
+ INSTALL_NATIVE(JSFunction, "ReferenceError", reference_error_function);
+ INSTALL_NATIVE(JSFunction, "SyntaxError", syntax_error_function);
+ INSTALL_NATIVE(JSFunction, "TypeError", type_error_function);
+ INSTALL_NATIVE(JSFunction, "URIError", uri_error_function);
+ INSTALL_NATIVE(JSFunction, "MakeError", make_error_function);
+
+ INSTALL_NATIVE(Symbol, "promiseStatus", promise_status);
+ INSTALL_NATIVE(Symbol, "promiseValue", promise_value);
+ INSTALL_NATIVE(JSFunction, "PromiseCreate", promise_create);
+ INSTALL_NATIVE(JSFunction, "PromiseResolve", promise_resolve);
+ INSTALL_NATIVE(JSFunction, "PromiseReject", promise_reject);
+ INSTALL_NATIVE(JSFunction, "PromiseChain", promise_chain);
+ INSTALL_NATIVE(JSFunction, "PromiseCatch", promise_catch);
+ INSTALL_NATIVE(JSFunction, "PromiseThen", promise_then);
+ INSTALL_NATIVE(JSFunction, "PromiseHasUserDefinedRejectHandler",
+ promise_has_user_defined_reject_handler);
+
+ INSTALL_NATIVE(JSFunction, "ObserveNotifyChange", observers_notify_change);
+ INSTALL_NATIVE(JSFunction, "ObserveEnqueueSpliceRecord",
+ observers_enqueue_splice);
+ INSTALL_NATIVE(JSFunction, "ObserveBeginPerformSplice",
+ observers_begin_perform_splice);
+ INSTALL_NATIVE(JSFunction, "ObserveEndPerformSplice",
+ observers_end_perform_splice);
+ INSTALL_NATIVE(JSFunction, "ObserveNativeObjectObserve",
+ native_object_observe);
+ INSTALL_NATIVE(JSFunction, "ObserveNativeObjectGetNotifier",
+ native_object_get_notifier);
+ INSTALL_NATIVE(JSFunction, "ObserveNativeObjectNotifierPerformChange",
+ native_object_notifier_perform_change);
+
+ INSTALL_NATIVE(JSFunction, "ArrayValues", array_values_iterator);
+ INSTALL_NATIVE(JSFunction, "MapGet", map_get);
+ INSTALL_NATIVE(JSFunction, "MapSet", map_set);
+ INSTALL_NATIVE(JSFunction, "MapHas", map_has);
+ INSTALL_NATIVE(JSFunction, "MapDelete", map_delete);
+ INSTALL_NATIVE(JSFunction, "SetAdd", set_add);
+ INSTALL_NATIVE(JSFunction, "SetHas", set_has);
+ INSTALL_NATIVE(JSFunction, "SetDelete", set_delete);
+ INSTALL_NATIVE(JSFunction, "MapFromArray", map_from_array);
+ INSTALL_NATIVE(JSFunction, "SetFromArray", set_from_array);
+}
+
+
+#define EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(id) \
+ static void InstallExperimentalNatives_##id(Isolate* isolate, \
+ Handle<Context> native_context, \
+ Handle<JSObject> container) {}
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_modules)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_array_includes)
@@ -1807,33 +1823,69 @@ EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_regexps)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_arrow_functions)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_tostring)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_sloppy)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_unicode)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_sloppy_function)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_sloppy_let)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_unicode_regexps)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_computed_property_names)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_rest_parameters)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_default_parameters)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_reflect)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_spreadcalls)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_destructuring)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_object)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_object_observe)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_spread_arrays)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_sharedarraybuffer)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_atomics)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_new_target)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_concat_spreadable)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_simd)
-void Genesis::InstallNativeFunctions_harmony_proxies() {
+static void InstallExperimentalNatives_harmony_proxies(
+ Isolate* isolate, Handle<Context> native_context,
+ Handle<JSObject> container) {
if (FLAG_harmony_proxies) {
- INSTALL_NATIVE(JSFunction, "$proxyDerivedHasTrap", derived_has_trap);
- INSTALL_NATIVE(JSFunction, "$proxyDerivedGetTrap", derived_get_trap);
- INSTALL_NATIVE(JSFunction, "$proxyDerivedSetTrap", derived_set_trap);
- INSTALL_NATIVE(JSFunction, "$proxyEnumerate", proxy_enumerate);
+ INSTALL_NATIVE(JSFunction, "ProxyDerivedGetTrap", derived_get_trap);
+ INSTALL_NATIVE(JSFunction, "ProxyDerivedHasTrap", derived_has_trap);
+ INSTALL_NATIVE(JSFunction, "ProxyDerivedSetTrap", derived_set_trap);
+ INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
}
}
+void Bootstrapper::ImportExperimentalNatives(Isolate* isolate,
+ Handle<JSObject> container) {
+ HandleScope scope(isolate);
+ Handle<Context> native_context = isolate->native_context();
+#define INSTALL_NATIVE_FUNCTIONS_FOR(id, descr) \
+ InstallExperimentalNatives_##id(isolate, native_context, container);
+
+ HARMONY_INPROGRESS(INSTALL_NATIVE_FUNCTIONS_FOR)
+ HARMONY_STAGED(INSTALL_NATIVE_FUNCTIONS_FOR)
+ HARMONY_SHIPPING(INSTALL_NATIVE_FUNCTIONS_FOR)
+#undef INSTALL_NATIVE_FUNCTIONS_FOR
+}
+
#undef INSTALL_NATIVE
+
+bool Bootstrapper::InstallJSBuiltins(Isolate* isolate,
+ Handle<JSObject> container) {
+ HandleScope scope(isolate);
+ Handle<JSBuiltinsObject> builtins = isolate->js_builtins_object();
+ for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
+ Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
+ Handle<Object> function_object =
+ Object::GetProperty(isolate, container, Builtins::GetName(id))
+ .ToHandleChecked();
+ DCHECK(function_object->IsJSFunction());
+ Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
+ builtins->set_javascript_builtin(id, *function);
+ }
+ return true;
+}
+
+
#define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
void Genesis::InitializeGlobal_##id() {}
@@ -1842,12 +1894,14 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_array_includes)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_arrow_functions)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_proxies)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_computed_property_names)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_function)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_let)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_rest_parameters)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_default_parameters)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_spreadcalls)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_observe)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_spread_arrays)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_atomics)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_new_target)
@@ -1877,22 +1931,27 @@ void Genesis::InitializeGlobal_harmony_unicode_regexps() {
void Genesis::InitializeGlobal_harmony_reflect() {
Handle<JSObject> builtins(native_context()->builtins());
- // Install references to functions of the Reflect object
- if (FLAG_harmony_reflect || FLAG_harmony_spreadcalls) {
- Handle<JSFunction> apply = InstallFunction(
- builtins, "$reflectApply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), Builtins::kReflectApply);
- Handle<JSFunction> construct = InstallFunction(
- builtins, "$reflectConstruct", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), Builtins::kReflectConstruct);
- apply->shared()->set_internal_formal_parameter_count(3);
- apply->shared()->set_length(3);
- construct->shared()->set_internal_formal_parameter_count(3);
- construct->shared()->set_length(2);
- }
+ Handle<JSFunction> apply = InstallFunction(
+ builtins, "$reflectApply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ MaybeHandle<JSObject>(), Builtins::kReflectApply);
+ apply->shared()->set_internal_formal_parameter_count(3);
+ apply->shared()->set_length(3);
+ Handle<TypeFeedbackVector> apply_feedback_vector =
+ TypeFeedbackVector::CreatePushAppliedArgumentsVector(isolate());
+ apply->shared()->set_feedback_vector(*apply_feedback_vector);
+
+ Handle<JSFunction> construct = InstallFunction(
+ builtins, "$reflectConstruct", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ MaybeHandle<JSObject>(), Builtins::kReflectConstruct);
+ construct->shared()->set_internal_formal_parameter_count(3);
+ construct->shared()->set_length(2);
+ Handle<TypeFeedbackVector> construct_feedback_vector =
+ TypeFeedbackVector::CreatePushAppliedArgumentsVector(isolate());
+ construct->shared()->set_feedback_vector(*construct_feedback_vector);
if (!FLAG_harmony_reflect) return;
+
Handle<JSGlobalObject> global(JSGlobalObject::cast(
native_context()->global_object()));
Handle<String> reflect_string =
@@ -1928,6 +1987,37 @@ void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
}
+void Genesis::InitializeGlobal_harmony_simd() {
+ if (!FLAG_harmony_simd) return;
+
+ Handle<JSGlobalObject> global(
+ JSGlobalObject::cast(native_context()->global_object()));
+ Isolate* isolate = global->GetIsolate();
+ Factory* factory = isolate->factory();
+
+ Handle<String> name = factory->InternalizeUtf8String("SIMD");
+ Handle<JSFunction> cons = factory->NewFunction(name);
+ JSFunction::SetInstancePrototype(
+ cons,
+ Handle<Object>(native_context()->initial_object_prototype(), isolate));
+ cons->SetInstanceClassName(*name);
+ Handle<JSObject> simd_object = factory->NewJSObject(cons, TENURED);
+ DCHECK(simd_object->IsJSObject());
+ JSObject::AddProperty(global, name, simd_object, DONT_ENUM);
+
+// Install SIMD type functions. Set the instance class names since
+// InstallFunction only does this when we install on the GlobalObject.
+#define SIMD128_INSTALL_FUNCTION(TYPE, Type, type, lane_count, lane_type) \
+ Handle<JSFunction> type##_function = InstallFunction( \
+ simd_object, #Type, JS_VALUE_TYPE, JSValue::kSize, \
+ isolate->initial_object_prototype(), Builtins::kIllegal); \
+ native_context()->set_##type##_function(*type##_function); \
+ type##_function->SetInstanceClassName(*factory->Type##_string());
+ SIMD128_TYPES(SIMD128_INSTALL_FUNCTION)
+#undef SIMD128_INSTALL_FUNCTION
+}
+
+
Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
const char* name,
ElementsKind elements_kind) {
@@ -1971,7 +2061,7 @@ Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
}
-bool Genesis::InstallNatives() {
+bool Genesis::InstallNatives(ContextType context_type) {
HandleScope scope(isolate());
// Create a function for the builtins object. Allocate space for the
@@ -2025,11 +2115,15 @@ bool Genesis::InstallNatives() {
"utils container for native scripts");
native_context()->set_natives_utils_object(*utils);
- Handle<JSObject> extras_exports =
- factory()->NewJSObject(isolate()->object_function());
- JSObject::NormalizeProperties(extras_exports, CLEAR_INOBJECT_PROPERTIES, 2,
- "container to export to extra natives");
- native_context()->set_extras_exports_object(*extras_exports);
+ int builtin_index = Natives::GetDebuggerCount();
+ // Only run prologue.js and runtime.js at this point.
+ DCHECK_EQ(builtin_index, Natives::GetIndex("prologue"));
+ if (!Bootstrapper::CompileBuiltin(isolate(), builtin_index++)) return false;
+ DCHECK_EQ(builtin_index, Natives::GetIndex("runtime"));
+ if (!Bootstrapper::CompileBuiltin(isolate(), builtin_index++)) return false;
+
+ // A thin context is ready at this point.
+ if (context_type == THIN_CONTEXT) return true;
if (FLAG_expose_natives_as != NULL) {
Handle<String> utils_key = factory()->NewStringFromAsciiChecked("utils");
@@ -2316,20 +2410,16 @@ bool Genesis::InstallNatives() {
#undef INSTALL_PUBLIC_SYMBOL
}
- int i = Natives::GetDebuggerCount();
- if (!CompileBuiltin(isolate(), i)) return false;
- if (!InstallJSBuiltins(builtins)) return false;
-
- for (++i; i < Natives::GetBuiltinsCount(); ++i) {
- if (!CompileBuiltin(isolate(), i)) return false;
+ // Run the rest of the native scripts.
+ while (builtin_index < Natives::GetBuiltinsCount()) {
+ if (!Bootstrapper::CompileBuiltin(isolate(), builtin_index++)) return false;
}
if (!CallUtilsFunction(isolate(), "PostNatives")) return false;
- InstallNativeFunctions();
-
auto function_cache =
- ObjectHashTable::New(isolate(), ApiNatives::kInitialFunctionCacheSize);
+ ObjectHashTable::New(isolate(), ApiNatives::kInitialFunctionCacheSize,
+ USE_CUSTOM_MINIMUM_CAPACITY);
native_context()->set_function_cache(*function_cache);
// Store the map for the string prototype after the natives has been compiled
@@ -2356,6 +2446,9 @@ bool Genesis::InstallNatives() {
Handle<JSFunction> apply =
InstallFunction(proto, "apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
MaybeHandle<JSObject>(), Builtins::kFunctionApply);
+ Handle<TypeFeedbackVector> feedback_vector =
+ TypeFeedbackVector::CreatePushAppliedArgumentsVector(isolate());
+ apply->shared()->set_feedback_vector(*feedback_vector);
// Make sure that Function.prototype.call appears to be compiled.
// The code will never be called, but inline caching for call will
@@ -2422,8 +2515,7 @@ bool Genesis::InstallNatives() {
initial_map->AppendDescriptor(&input_field);
}
- initial_map->set_inobject_properties(2);
- initial_map->set_pre_allocated_property_fields(2);
+ initial_map->SetInObjectProperties(2);
initial_map->set_unused_property_fields(0);
native_context()->set_regexp_result_map(*initial_map);
@@ -2485,10 +2577,11 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_tostring_natives[] = {"native harmony-tostring.js",
nullptr};
static const char* harmony_sloppy_natives[] = {nullptr};
- static const char* harmony_unicode_natives[] = {nullptr};
+ static const char* harmony_sloppy_function_natives[] = {nullptr};
+ static const char* harmony_sloppy_let_natives[] = {nullptr};
static const char* harmony_unicode_regexps_natives[] = {nullptr};
- static const char* harmony_computed_property_names_natives[] = {nullptr};
static const char* harmony_rest_parameters_natives[] = {nullptr};
+ static const char* harmony_default_parameters_natives[] = {nullptr};
static const char* harmony_reflect_natives[] = {"native harmony-reflect.js",
nullptr};
static const char* harmony_spreadcalls_natives[] = {
@@ -2496,6 +2589,8 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_destructuring_natives[] = {nullptr};
static const char* harmony_object_natives[] = {"native harmony-object.js",
NULL};
+ static const char* harmony_object_observe_natives[] = {
+ "native harmony-object-observe.js", nullptr};
static const char* harmony_spread_arrays_natives[] = {nullptr};
static const char* harmony_sharedarraybuffer_natives[] = {
"native harmony-sharedarraybuffer.js", NULL};
@@ -2504,6 +2599,8 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_new_target_natives[] = {nullptr};
static const char* harmony_concat_spreadable_natives[] = {
"native harmony-concat-spreadable.js", nullptr};
+ static const char* harmony_simd_natives[] = {"native harmony-simd.js",
+ nullptr};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -2513,7 +2610,9 @@ bool Genesis::InstallExperimentalNatives() {
Vector<const char> script_name = ExperimentalNatives::GetScriptName(i); \
if (strncmp(script_name.start(), id##_natives[j], \
script_name.length()) == 0) { \
- if (!CompileExperimentalBuiltin(isolate(), i)) return false; \
+ if (!Bootstrapper::CompileExperimentalBuiltin(isolate(), i)) { \
+ return false; \
+ } \
} \
} \
}
@@ -2525,16 +2624,41 @@ bool Genesis::InstallExperimentalNatives() {
if (!CallUtilsFunction(isolate(), "PostExperimentals")) return false;
- InstallExperimentalNativeFunctions();
InstallExperimentalBuiltinFunctionIds();
return true;
}
bool Genesis::InstallExtraNatives() {
+ HandleScope scope(isolate());
+
+ Handle<JSObject> extras_binding =
+ factory()->NewJSObject(isolate()->object_function());
+ JSObject::NormalizeProperties(extras_binding, CLEAR_INOBJECT_PROPERTIES, 2,
+ "container for binding to/from extra natives");
+ native_context()->set_extras_binding_object(*extras_binding);
+
for (int i = ExtraNatives::GetDebuggerCount();
i < ExtraNatives::GetBuiltinsCount(); i++) {
- if (!CompileExtraBuiltin(isolate(), i)) return false;
+ if (!Bootstrapper::CompileExtraBuiltin(isolate(), i)) return false;
+ }
+
+ return true;
+}
+
+
+bool Genesis::InstallDebuggerNatives() {
+ for (int i = 0; i < Natives::GetDebuggerCount(); ++i) {
+ if (!Bootstrapper::CompileBuiltin(isolate(), i)) return false;
+ }
+ return CallUtilsFunction(isolate(), "PostDebug");
+}
+
+
+bool Bootstrapper::InstallCodeStubNatives(Isolate* isolate) {
+ for (int i = CodeStubNatives::GetDebuggerCount();
+ i < CodeStubNatives::GetBuiltinsCount(); i++) {
+ if (!CompileCodeStubBuiltin(isolate, i)) return false;
}
return true;
@@ -2599,50 +2723,6 @@ void Genesis::InstallExperimentalBuiltinFunctionIds() {
#undef INSTALL_BUILTIN_ID
-// Do not forget to update macros.py with named constant
-// of cache id.
-#define JSFUNCTION_RESULT_CACHE_LIST(F) \
- F(16, native_context()->regexp_function())
-
-
-static FixedArray* CreateCache(int size, Handle<JSFunction> factory_function) {
- Factory* factory = factory_function->GetIsolate()->factory();
- // Caches are supposed to live for a long time, allocate in old space.
- int array_size = JSFunctionResultCache::kEntriesIndex + 2 * size;
- // Cannot use cast as object is not fully initialized yet.
- JSFunctionResultCache* cache = reinterpret_cast<JSFunctionResultCache*>(
- *factory->NewFixedArrayWithHoles(array_size, TENURED));
- cache->set(JSFunctionResultCache::kFactoryIndex, *factory_function);
- cache->MakeZeroSize();
- return cache;
-}
-
-
-void Genesis::InstallJSFunctionResultCaches() {
- const int kNumberOfCaches = 0 +
-#define F(size, func) + 1
- JSFUNCTION_RESULT_CACHE_LIST(F)
-#undef F
- ;
-
- Handle<FixedArray> caches =
- factory()->NewFixedArray(kNumberOfCaches, TENURED);
-
- int index = 0;
-
-#define F(size, func) do { \
- FixedArray* cache = CreateCache((size), Handle<JSFunction>(func)); \
- caches->set(index++, cache); \
- } while (false)
-
- JSFUNCTION_RESULT_CACHE_LIST(F);
-
-#undef F
-
- native_context()->set_jsfunction_result_caches(*caches);
-}
-
-
void Genesis::InitializeNormalizedMapCaches() {
Handle<NormalizedMapCache> cache = NormalizedMapCache::New(isolate());
native_context()->set_normalized_map_cache(*cache);
@@ -2676,9 +2756,6 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit), isolate);
JSObject::AddProperty(Error, name, stack_trace_limit, NONE);
- // By now the utils object is useless and can be removed.
- native_context->set_natives_utils_object(*factory->undefined_value());
-
// Expose the natives in global if a name for it is specified.
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
Handle<String> natives_key =
@@ -2698,6 +2775,15 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
factory->stack_trace_symbol(), NONE),
false);
+ // Expose the internal error symbol to native JS
+ RETURN_ON_EXCEPTION_VALUE(isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ handle(native_context->builtins(), isolate),
+ factory->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("$internalErrorSymbol")),
+ factory->internal_error_symbol(), NONE),
+ false);
+
// Expose the debug global object in global if a name for it is specified.
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
// If loading fails we just bail out without installing the
@@ -2847,19 +2933,6 @@ bool Genesis::InstallExtension(Isolate* isolate,
}
-bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
- HandleScope scope(isolate());
- for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
- Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
- Handle<Object> function_object = Object::GetProperty(
- isolate(), builtins, Builtins::GetName(id)).ToHandleChecked();
- Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
- builtins->set_javascript_builtin(id, *function);
- }
- return true;
-}
-
-
bool Genesis::ConfigureGlobalObjects(
v8::Local<v8::ObjectTemplate> global_proxy_template) {
Handle<JSObject> global_proxy(
@@ -3083,7 +3156,8 @@ class NoTrackDoubleFieldsForSerializerScope {
Genesis::Genesis(Isolate* isolate,
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
- v8::ExtensionConfiguration* extensions)
+ v8::ExtensionConfiguration* extensions,
+ ContextType context_type)
: isolate_(isolate), active_(isolate->bootstrapper()) {
NoTrackDoubleFieldsForSerializerScope disable_scope(isolate);
result_ = Handle<Context>::null();
@@ -3151,29 +3225,43 @@ Genesis::Genesis(Isolate* isolate,
Handle<GlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
HookUpGlobalProxy(global_object, global_proxy);
- InitializeGlobal(global_object, empty_function);
- InstallJSFunctionResultCaches();
+ InitializeGlobal(global_object, empty_function, context_type);
InitializeNormalizedMapCaches();
- if (!InstallNatives()) return;
+
+ if (!InstallNatives(context_type)) return;
MakeFunctionInstancePrototypeWritable();
- if (!ConfigureGlobalObjects(global_proxy_template)) return;
+ if (context_type != THIN_CONTEXT) {
+ if (!InstallExtraNatives()) return;
+ if (!ConfigureGlobalObjects(global_proxy_template)) return;
+ }
isolate->counters()->contexts_created_from_scratch()->Increment();
}
- // Install experimental and extra natives. Do not include them into the
+ // Install experimental natives. Do not include them into the
// snapshot as we should be able to turn them off at runtime. Re-installing
// them after they have already been deserialized would also fail.
- if (!isolate->serializer_enabled()) {
+ if (context_type == FULL_CONTEXT) {
+ if (!isolate->serializer_enabled()) {
+ InitializeExperimentalGlobal();
+ if (!InstallExperimentalNatives()) return;
+ // By now the utils object is useless and can be removed.
+ native_context()->set_natives_utils_object(
+ isolate->heap()->undefined_value());
+ }
+ // The serializer cannot serialize typed arrays. Reset those typed arrays
+ // for each new context.
+ InitializeBuiltinTypedArrays();
+ } else if (context_type == DEBUG_CONTEXT) {
+ DCHECK(!isolate->serializer_enabled());
InitializeExperimentalGlobal();
- if (!InstallExperimentalNatives()) return;
- if (!InstallExtraNatives()) return;
+ if (!InstallDebuggerNatives()) return;
}
- // The serializer cannot serialize typed arrays. Reset those typed arrays
- // for each new context.
- InitializeBuiltinTypedArrays();
+ // Check that the script context table is empty except for the 'this' binding.
+ // We do not need script contexts for native scripts.
+ DCHECK_EQ(1, native_context()->script_context_table()->used());
result_ = native_context();
}
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index cb0d918b17..e478681431 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -61,6 +61,7 @@ class SourceCodeCache final BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
};
+enum ContextType { FULL_CONTEXT, THIN_CONTEXT, DEBUG_CONTEXT };
// The Boostrapper is the public interface for creating a JavaScript global
// context.
@@ -78,7 +79,10 @@ class Bootstrapper final {
Handle<Context> CreateEnvironment(
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_object_template,
- v8::ExtensionConfiguration* extensions);
+ v8::ExtensionConfiguration* extensions,
+ ContextType context_type = FULL_CONTEXT);
+
+ bool CreateCodeStubContext(Isolate* isolate);
// Detach the environment from its outer global object.
void DetachGlobal(Handle<Context> env);
@@ -105,6 +109,20 @@ class Bootstrapper final {
SourceCodeCache* extensions_cache() { return &extensions_cache_; }
+ static bool CompileNative(Isolate* isolate, Vector<const char> name,
+ Handle<JSObject> receiver, Handle<String> source,
+ int argc, Handle<Object> argv[]);
+ static bool CompileBuiltin(Isolate* isolate, int index);
+ static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
+ static bool CompileExtraBuiltin(Isolate* isolate, int index);
+ static bool CompileCodeStubBuiltin(Isolate* isolate, int index);
+ static bool InstallCodeStubNatives(Isolate* isolate);
+
+ static void ImportNatives(Isolate* isolate, Handle<JSObject> container);
+ static void ImportExperimentalNatives(Isolate* isolate,
+ Handle<JSObject> container);
+ static bool InstallJSBuiltins(Isolate* isolate, Handle<JSObject> container);
+
private:
Isolate* isolate_;
typedef int NestingCounterType;
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 869ad2cc00..aa7268f6b6 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -12,8 +12,8 @@
#include "src/builtins.h"
#include "src/cpu-profiler.h"
#include "src/elements.h"
+#include "src/frames-inl.h"
#include "src/gdb-jit.h"
-#include "src/heap/mark-compact.h"
#include "src/heap-profiler.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
@@ -262,7 +262,7 @@ static inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
ElementsKind target_kind = origin_kind;
{
DisallowHeapAllocation no_gc;
- int arg_count = args->length() - first_added_arg;
+ int arg_count = args_length - first_added_arg;
Object** arguments = args->arguments() - first_added_arg - (arg_count - 1);
for (int i = 0; i < arg_count; i++) {
Object* arg = arguments[i];
@@ -321,104 +321,22 @@ BUILTIN(ArrayPush) {
if (!maybe_elms_obj.ToHandle(&elms_obj)) {
return CallJsBuiltin(isolate, "$arrayPush", args);
}
-
+ // Fast Elements Path
+ int push_size = args.length() - 1;
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
int len = Smi::cast(array->length())->value();
- int to_add = args.length() - 1;
- if (to_add > 0 && JSArray::WouldChangeReadOnlyLength(array, len + to_add)) {
+ if (push_size == 0) {
+ return Smi::FromInt(len);
+ }
+ if (push_size > 0 &&
+ JSArray::WouldChangeReadOnlyLength(array, len + push_size)) {
return CallJsBuiltin(isolate, "$arrayPush", args);
}
DCHECK(!array->map()->is_observed());
-
- ElementsKind kind = array->GetElementsKind();
-
- if (IsFastSmiOrObjectElementsKind(kind)) {
- Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
- if (to_add == 0) {
- return Smi::FromInt(len);
- }
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- DCHECK(to_add <= (Smi::kMaxValue - len));
-
- int new_length = len + to_add;
-
- if (new_length > elms->length()) {
- // New backing storage is needed.
- int capacity = new_length + (new_length >> 1) + 16;
- Handle<FixedArray> new_elms =
- isolate->factory()->NewUninitializedFixedArray(capacity);
-
- ElementsAccessor* accessor = array->GetElementsAccessor();
- accessor->CopyElements(
- elms_obj, 0, kind, new_elms, 0,
- ElementsAccessor::kCopyToEndAndInitializeToHole);
-
- elms = new_elms;
- }
-
- // Add the provided values.
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int index = 0; index < to_add; index++) {
- elms->set(index + len, args[index + 1], mode);
- }
-
- if (*elms != array->elements()) {
- array->set_elements(*elms);
- }
-
- // Set the length.
- array->set_length(Smi::FromInt(new_length));
- return Smi::FromInt(new_length);
- } else {
- int elms_len = elms_obj->length();
- if (to_add == 0) {
- return Smi::FromInt(len);
- }
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- DCHECK(to_add <= (Smi::kMaxValue - len));
-
- int new_length = len + to_add;
-
- Handle<FixedDoubleArray> new_elms;
-
- if (new_length > elms_len) {
- // New backing storage is needed.
- int capacity = new_length + (new_length >> 1) + 16;
- // Create new backing store; since capacity > 0, we can
- // safely cast to FixedDoubleArray.
- new_elms = Handle<FixedDoubleArray>::cast(
- isolate->factory()->NewFixedDoubleArray(capacity));
-
- ElementsAccessor* accessor = array->GetElementsAccessor();
- accessor->CopyElements(
- elms_obj, 0, kind, new_elms, 0,
- ElementsAccessor::kCopyToEndAndInitializeToHole);
-
- } else {
- // to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the
- // empty_fixed_array.
- new_elms = Handle<FixedDoubleArray>::cast(elms_obj);
- }
-
- // Add the provided values.
- DisallowHeapAllocation no_gc;
- int index;
- for (index = 0; index < to_add; index++) {
- Object* arg = args[index + 1];
- new_elms->set(index + len, arg->Number());
- }
-
- if (*new_elms != array->elements()) {
- array->set_elements(*new_elms);
- }
-
- // Set the length.
- array->set_length(Smi::FromInt(new_length));
- return Smi::FromInt(new_length);
- }
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ int new_length = accessor->Push(array, elms_obj, &args[1], push_size,
+ ElementsAccessor::kDirectionReverse);
+ return Smi::FromInt(new_length);
}
@@ -503,7 +421,6 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) {
HandleScope scope(isolate);
- Heap* heap = isolate->heap();
Handle<Object> receiver = args.receiver();
MaybeHandle<FixedArrayBase> maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1);
@@ -545,6 +462,7 @@ BUILTIN(ArrayUnshift) {
array->set_elements(*elms);
} else {
DisallowHeapAllocation no_gc;
+ Heap* heap = isolate->heap();
heap->MoveElements(*elms, to_add, 0, len);
}
@@ -1369,34 +1287,15 @@ static void Generate_KeyedStoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
}
-static void Generate_CallICStub_DebugBreak(MacroAssembler* masm) {
- DebugCodegen::GenerateCallICStubDebugBreak(masm);
-}
-
-
static void Generate_Return_DebugBreak(MacroAssembler* masm) {
- DebugCodegen::GenerateReturnDebugBreak(masm);
-}
-
-
-static void Generate_CallFunctionStub_DebugBreak(MacroAssembler* masm) {
- DebugCodegen::GenerateCallFunctionStubDebugBreak(masm);
-}
-
-
-static void Generate_CallConstructStub_DebugBreak(MacroAssembler* masm) {
- DebugCodegen::GenerateCallConstructStubDebugBreak(masm);
-}
-
-
-static void Generate_CallConstructStub_Recording_DebugBreak(
- MacroAssembler* masm) {
- DebugCodegen::GenerateCallConstructStubRecordDebugBreak(masm);
+ DebugCodegen::GenerateDebugBreakStub(masm,
+ DebugCodegen::SAVE_RESULT_REGISTER);
}
static void Generate_Slot_DebugBreak(MacroAssembler* masm) {
- DebugCodegen::GenerateSlotDebugBreak(masm);
+ DebugCodegen::GenerateDebugBreakStub(masm,
+ DebugCodegen::IGNORE_RESULT_REGISTER);
}
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index e1e202b3f7..cf90aacf89 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -70,9 +70,10 @@ enum BuiltinExtraArguments {
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSConstructStubForDerived, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructStubNewTarget, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterExitTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(CompileLazy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(CompileOptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, kNoExtraICState) \
@@ -140,28 +141,15 @@ enum BuiltinExtraArguments {
V(StoreIC_Normal, STORE_IC)
// Define list of builtins used by the debugger implemented in assembly.
-#define BUILTIN_LIST_DEBUG_A(V) \
- V(Return_DebugBreak, BUILTIN, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(CallConstructStub_DebugBreak, BUILTIN, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(CallConstructStub_Recording_DebugBreak, BUILTIN, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(CallICStub_DebugBreak, CALL_IC, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(Slot_DebugBreak, BUILTIN, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(PlainReturn_LiveEdit, BUILTIN, DEBUG_STUB, \
- DEBUG_BREAK) \
- V(FrameDropper_LiveEdit, BUILTIN, DEBUG_STUB, \
- DEBUG_BREAK)
+#define BUILTIN_LIST_DEBUG_A(V) \
+ V(Return_DebugBreak, BUILTIN, DEBUG_STUB, kNoExtraICState) \
+ V(Slot_DebugBreak, BUILTIN, DEBUG_STUB, kNoExtraICState) \
+ V(PlainReturn_LiveEdit, BUILTIN, DEBUG_STUB, kNoExtraICState) \
+ V(FrameDropper_LiveEdit, BUILTIN, DEBUG_STUB, kNoExtraICState)
// Define list of builtins implemented in JavaScript.
#define BUILTINS_LIST_JS(V) \
V(EQUALS, 1) \
- V(STRICT_EQUALS, 1) \
V(COMPARE, 2) \
V(COMPARE_STRONG, 2) \
V(ADD, 1) \
@@ -186,21 +174,17 @@ enum BuiltinExtraArguments {
V(SAR_STRONG, 1) \
V(SHR, 1) \
V(SHR_STRONG, 1) \
- V(DELETE, 2) \
V(IN, 1) \
V(INSTANCE_OF, 1) \
V(CALL_NON_FUNCTION, 0) \
V(CALL_NON_FUNCTION_AS_CONSTRUCTOR, 0) \
V(CALL_FUNCTION_PROXY, 1) \
V(CALL_FUNCTION_PROXY_AS_CONSTRUCTOR, 1) \
- V(TO_OBJECT, 0) \
V(TO_NUMBER, 0) \
V(TO_STRING, 0) \
V(TO_NAME, 0) \
V(STRING_ADD_LEFT, 1) \
- V(STRING_ADD_LEFT_STRONG, 1) \
V(STRING_ADD_RIGHT, 1) \
- V(STRING_ADD_RIGHT_STRONG, 1) \
V(APPLY_PREPARE, 1) \
V(REFLECT_APPLY_PREPARE, 1) \
V(REFLECT_CONSTRUCT_PREPARE, 2) \
@@ -317,9 +301,10 @@ class Builtins {
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSConstructStubForDerived(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);
- static void Generate_JSConstructStubNewTarget(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
+ static void Generate_InterpreterEntryTrampoline(MacroAssembler* masm);
+ static void Generate_InterpreterExitTrampoline(MacroAssembler* masm);
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
diff --git a/deps/v8/src/cached-powers.cc b/deps/v8/src/cached-powers.cc
index 5bd0884eba..52fff7e145 100644
--- a/deps/v8/src/cached-powers.cc
+++ b/deps/v8/src/cached-powers.cc
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/cached-powers.h"
+
#include <limits.h>
#include <stdarg.h>
#include <stdint.h>
#include <cmath>
#include "src/base/logging.h"
-#include "src/cached-powers.h"
#include "src/globals.h"
namespace v8 {
diff --git a/deps/v8/src/cancelable-task.cc b/deps/v8/src/cancelable-task.cc
new file mode 100644
index 0000000000..32d5057c49
--- /dev/null
+++ b/deps/v8/src/cancelable-task.cc
@@ -0,0 +1,28 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/cancelable-task.h"
+
+#include "src/base/platform/platform.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+
+Cancelable::Cancelable(Isolate* isolate)
+ : isolate_(isolate), is_cancelled_(false) {
+ isolate->RegisterCancelableTask(this);
+}
+
+
+Cancelable::~Cancelable() {
+ if (!is_cancelled_) {
+ isolate_->RemoveCancelableTask(this);
+ }
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/cancelable-task.h b/deps/v8/src/cancelable-task.h
new file mode 100644
index 0000000000..bae5b580cd
--- /dev/null
+++ b/deps/v8/src/cancelable-task.h
@@ -0,0 +1,74 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CANCELABLE_TASK_H_
+#define V8_CANCELABLE_TASK_H_
+
+#include "include/v8-platform.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+
+class Cancelable {
+ public:
+ explicit Cancelable(Isolate* isolate);
+ virtual ~Cancelable();
+
+ virtual void Cancel() { is_cancelled_ = true; }
+
+ protected:
+ Isolate* isolate_;
+ bool is_cancelled_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Cancelable);
+};
+
+
+// Multiple inheritance can be used because Task is a pure interface.
+class CancelableTask : public Cancelable, public Task {
+ public:
+ explicit CancelableTask(Isolate* isolate) : Cancelable(isolate) {}
+
+ // Task overrides.
+ void Run() final {
+ if (!is_cancelled_) {
+ RunInternal();
+ }
+ }
+
+ virtual void RunInternal() = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CancelableTask);
+};
+
+
+// Multiple inheritance can be used because IdleTask is a pure interface.
+class CancelableIdleTask : public Cancelable, public IdleTask {
+ public:
+ explicit CancelableIdleTask(Isolate* isolate) : Cancelable(isolate) {}
+
+ // IdleTask overrides.
+ void Run(double deadline_in_seconds) final {
+ if (!is_cancelled_) {
+ RunInternal(deadline_in_seconds);
+ }
+ }
+
+ virtual void RunInternal(double deadline_in_seconds) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CancelableIdleTask);
+};
+
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CANCELABLE_TASK_H_
diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/char-predicates.h
index c68ad74b6a..89f417196e 100644
--- a/deps/v8/src/char-predicates.h
+++ b/deps/v8/src/char-predicates.h
@@ -13,6 +13,7 @@ namespace internal {
// Unicode character predicates as defined by ECMA-262, 3rd,
// used for lexical analysis.
+inline int AsciiAlphaToLower(uc32 c);
inline bool IsCarriageReturn(uc32 c);
inline bool IsLineFeed(uc32 c);
inline bool IsAsciiIdentifier(uc32 c);
diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc
deleted file mode 100644
index 2871a66c64..0000000000
--- a/deps/v8/src/checks.cc
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/checks.h"
-
-namespace v8 {
-namespace internal {} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index bdceb572a1..f6a8014b2e 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/code-factory.h"
#include "src/bootstrapper.h"
-#include "src/code-factory.h"
#include "src/ic/ic.h"
namespace v8 {
@@ -13,21 +12,21 @@ namespace internal {
// static
-Callable CodeFactory::LoadIC(Isolate* isolate, ContextualMode mode,
+Callable CodeFactory::LoadIC(Isolate* isolate, TypeofMode typeof_mode,
LanguageMode language_mode) {
return Callable(
LoadIC::initialize_stub(
- isolate, LoadICState(mode, language_mode).GetExtraICState()),
+ isolate, LoadICState(typeof_mode, language_mode).GetExtraICState()),
LoadDescriptor(isolate));
}
// static
Callable CodeFactory::LoadICInOptimizedCode(
- Isolate* isolate, ContextualMode mode, LanguageMode language_mode,
+ Isolate* isolate, TypeofMode typeof_mode, LanguageMode language_mode,
InlineCacheState initialization_state) {
auto code = LoadIC::initialize_stub_in_optimized_code(
- isolate, LoadICState(mode, language_mode).GetExtraICState(),
+ isolate, LoadICState(typeof_mode, language_mode).GetExtraICState(),
initialization_state);
return Callable(code, LoadWithVectorDescriptor(isolate));
}
@@ -139,6 +138,21 @@ Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op,
// static
+Callable CodeFactory::LoadGlobalViaContext(Isolate* isolate, int depth) {
+ LoadGlobalViaContextStub stub(isolate, depth);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::StoreGlobalViaContext(Isolate* isolate, int depth,
+ LanguageMode language_mode) {
+ StoreGlobalViaContextStub stub(isolate, depth, language_mode);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::Instanceof(Isolate* isolate,
InstanceofStub::Flags flags) {
InstanceofStub stub(isolate, flags);
@@ -163,6 +177,13 @@ Callable CodeFactory::ToNumber(Isolate* isolate) {
// static
+Callable CodeFactory::ToObject(Isolate* isolate) {
+ ToObjectStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag) {
StringAddStub stub(isolate, flags, pretenure_flag);
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 947770db42..1386f054bb 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -32,9 +32,10 @@ class Callable final BASE_EMBEDDED {
class CodeFactory final {
public:
// Initial states for ICs.
- static Callable LoadIC(Isolate* isolate, ContextualMode mode,
+ static Callable LoadIC(Isolate* isolate, TypeofMode typeof_mode,
LanguageMode language_mode);
- static Callable LoadICInOptimizedCode(Isolate* isolate, ContextualMode mode,
+ static Callable LoadICInOptimizedCode(Isolate* isolate,
+ TypeofMode typeof_mode,
LanguageMode language_mode,
InlineCacheState initialization_state);
static Callable KeyedLoadIC(Isolate* isolate, LanguageMode language_mode);
@@ -61,6 +62,10 @@ class CodeFactory final {
// Code stubs. Add methods here as needed to reduce dependency on
// code-stubs.h.
+ static Callable LoadGlobalViaContext(Isolate* isolate, int depth);
+ static Callable StoreGlobalViaContext(Isolate* isolate, int depth,
+ LanguageMode language_mode);
+
static Callable Instanceof(Isolate* isolate, InstanceofStub::Flags flags);
static Callable ToBoolean(
@@ -68,6 +73,7 @@ class CodeFactory final {
ToBooleanStub::Types types = ToBooleanStub::Types());
static Callable ToNumber(Isolate* isolate);
+ static Callable ToObject(Isolate* isolate);
static Callable StringAdd(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag);
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index d1cabde1bd..81304e5002 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -48,7 +48,8 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
protected:
virtual HValue* BuildCodeStub() = 0;
- int GetParameterCount() const {
+ int GetParameterCount() const { return descriptor_.GetParameterCount(); }
+ int GetRegisterParameterCount() const {
return descriptor_.GetRegisterParameterCount();
}
HParameter* GetParameter(int parameter) {
@@ -118,7 +119,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
ElementsKind kind);
- SmartArrayPointer<HParameter*> parameters_;
+ base::SmartArrayPointer<HParameter*> parameters_;
HValue* arguments_length_;
CompilationInfo* info_;
CodeStubDescriptor descriptor_;
@@ -138,6 +139,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
}
int param_count = GetParameterCount();
+ int register_param_count = GetRegisterParameterCount();
HEnvironment* start_environment = graph()->start_environment();
HBasicBlock* next_block = CreateBasicBlock(start_environment);
Goto(next_block);
@@ -148,11 +150,16 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
HInstruction* stack_parameter_count = NULL;
for (int i = 0; i < param_count; ++i) {
Representation r = GetParameterRepresentation(i);
- HParameter* param = Add<HParameter>(i,
- HParameter::REGISTER_PARAMETER, r);
+ HParameter* param;
+ if (i >= register_param_count) {
+ param = Add<HParameter>(i - register_param_count,
+ HParameter::STACK_PARAMETER, r);
+ } else {
+ param = Add<HParameter>(i, HParameter::REGISTER_PARAMETER, r);
+ }
start_environment->Bind(i, param);
parameters_[i] = param;
- if (IsParameterCountRegister(i)) {
+ if (i < register_param_count && IsParameterCountRegister(i)) {
param->set_type(HType::Smi());
stack_parameter_count = param;
arguments_length_ = stack_parameter_count;
@@ -161,7 +168,9 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
DCHECK(!runtime_stack_params || arguments_length_ != NULL);
if (!runtime_stack_params) {
- stack_parameter_count = graph()->GetConstantMinus1();
+ stack_parameter_count =
+ Add<HConstant>(param_count - register_param_count - 1);
+ // graph()->GetConstantMinus1();
arguments_length_ = graph()->GetConstant0();
}
@@ -313,7 +322,6 @@ Handle<Code> NumberToStringStub::GenerateCode() {
// Returns the type string of a value; see ECMA-262, 11.4.3 (p 47).
-// Possible optimizations: put the type string into the oddballs.
template <>
HValue* CodeStubGraphBuilder<TypeofStub>::BuildCodeStub() {
Factory* factory = isolate()->factory();
@@ -332,7 +340,6 @@ HValue* CodeStubGraphBuilder<TypeofStub>::BuildCodeStub() {
{ Push(number_string); }
is_number.Else();
{
- HConstant* undefined_string = Add<HConstant>(factory->undefined_string());
HValue* map = AddLoadMap(object, smi_check);
HValue* instance_type = Add<HLoadNamedField>(
map, nullptr, HObjectAccess::ForMapInstanceType());
@@ -349,24 +356,8 @@ HValue* CodeStubGraphBuilder<TypeofStub>::BuildCodeStub() {
instance_type, Add<HConstant>(ODDBALL_TYPE), Token::EQ);
is_oddball.Then();
{
- IfBuilder is_true_or_false(this);
- is_true_or_false.If<HCompareObjectEqAndBranch>(
- object, graph()->GetConstantTrue());
- is_true_or_false.OrIf<HCompareObjectEqAndBranch>(
- object, graph()->GetConstantFalse());
- is_true_or_false.Then();
- { Push(Add<HConstant>(factory->boolean_string())); }
- is_true_or_false.Else();
- {
- IfBuilder is_null(this);
- is_null.If<HCompareObjectEqAndBranch>(object,
- graph()->GetConstantNull());
- is_null.Then();
- { Push(object_string); }
- is_null.Else();
- { Push(undefined_string); }
- }
- is_true_or_false.End();
+ Push(Add<HLoadNamedField>(object, nullptr,
+ HObjectAccess::ForOddballTypeOf()));
}
is_oddball.Else();
{
@@ -389,13 +380,22 @@ HValue* CodeStubGraphBuilder<TypeofStub>::BuildCodeStub() {
{ Push(Add<HConstant>(factory->function_string())); }
is_function.Else();
{
+#define SIMD128_BUILDER_OPEN(TYPE, Type, type, lane_count, lane_type) \
+ IfBuilder is_##type(this); \
+ is_##type.If<HCompareObjectEqAndBranch>( \
+ map, Add<HConstant>(factory->type##_map())); \
+ is_##type.Then(); \
+ { Push(Add<HConstant>(factory->type##_string())); } \
+ is_##type.Else(); {
+ SIMD128_TYPES(SIMD128_BUILDER_OPEN)
+#undef SIMD128_BUILDER_OPEN
// Is it an undetectable object?
IfBuilder is_undetectable(this);
is_undetectable.If<HIsUndetectableAndBranch>(object);
is_undetectable.Then();
{
// typeof an undetectable object is 'undefined'.
- Push(undefined_string);
+ Push(Add<HConstant>(factory->undefined_string()));
}
is_undetectable.Else();
{
@@ -403,6 +403,9 @@ HValue* CodeStubGraphBuilder<TypeofStub>::BuildCodeStub() {
// host objects gives that it is okay to return "object".
Push(object_string);
}
+#define SIMD128_BUILDER_CLOSE(TYPE, Type, type, lane_count, lane_type) }
+ SIMD128_TYPES(SIMD128_BUILDER_CLOSE)
+#undef SIMD128_BUILDER_CLOSE
}
is_function.End();
}
@@ -1605,12 +1608,12 @@ Handle<Code> StoreGlobalStub::GenerateCode() {
}
-template<>
+template <>
HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
- HValue* value = GetParameter(ElementsTransitionAndStoreStub::kValueIndex);
- HValue* map = GetParameter(ElementsTransitionAndStoreStub::kMapIndex);
- HValue* key = GetParameter(ElementsTransitionAndStoreStub::kKeyIndex);
- HValue* object = GetParameter(ElementsTransitionAndStoreStub::kObjectIndex);
+ HValue* object = GetParameter(StoreTransitionDescriptor::kReceiverIndex);
+ HValue* key = GetParameter(StoreTransitionDescriptor::kNameIndex);
+ HValue* value = GetParameter(StoreTransitionDescriptor::kValueIndex);
+ HValue* map = GetParameter(StoreTransitionDescriptor::kMapIndex);
if (FLAG_trace_elements_transitions) {
// Tracing elements transitions is the job of the runtime.
@@ -1640,6 +1643,16 @@ Handle<Code> ElementsTransitionAndStoreStub::GenerateCode() {
}
+template <>
+HValue* CodeStubGraphBuilder<ToObjectStub>::BuildCodeStub() {
+ HValue* receiver = GetParameter(ToObjectDescriptor::kReceiverIndex);
+ return BuildToObject(receiver);
+}
+
+
+Handle<Code> ToObjectStub::GenerateCode() { return DoGenerateCode(this); }
+
+
void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
HValue* js_function,
HValue* native_context,
@@ -1743,71 +1756,59 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
is_optimized.Else();
{
AddIncrementCounter(counters->fast_new_closure_try_optimized());
- // optimized_map points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // Map must never be empty, so check the first elements.
+ // The {optimized_map} points to fixed array of 4-element entries:
+ // (native context, optimized code, literals, ast-id).
+ // Iterate through the {optimized_map} backwards. After the loop, if no
+ // matching optimized code was found, install unoptimized code.
+ // for(i = map.length() - SharedFunctionInfo::kEntryLength;
+ // i >= SharedFunctionInfo::kEntriesStart;
+ // i -= SharedFunctionInfo::kEntryLength) { ... }
HValue* first_entry_index =
Add<HConstant>(SharedFunctionInfo::kEntriesStart);
- IfBuilder already_in(this);
- BuildCheckAndInstallOptimizedCode(js_function, native_context, &already_in,
- optimized_map, first_entry_index);
- already_in.Else();
+ HValue* shared_function_entry_length =
+ Add<HConstant>(SharedFunctionInfo::kEntryLength);
+ LoopBuilder loop_builder(this, context(), LoopBuilder::kPostDecrement,
+ shared_function_entry_length);
+ HValue* array_length = Add<HLoadNamedField>(
+ optimized_map, nullptr, HObjectAccess::ForFixedArrayLength());
+ HValue* start_pos =
+ AddUncasted<HSub>(array_length, shared_function_entry_length);
+ HValue* slot_iterator =
+ loop_builder.BeginBody(start_pos, first_entry_index, Token::GTE);
{
- // Iterate through the rest of map backwards. Do not double check first
- // entry. After the loop, if no matching optimized code was found,
- // install unoptimized code.
- // for(i = map.length() - SharedFunctionInfo::kEntryLength;
- // i > SharedFunctionInfo::kEntriesStart;
- // i -= SharedFunctionInfo::kEntryLength) { .. }
- HValue* shared_function_entry_length =
- Add<HConstant>(SharedFunctionInfo::kEntryLength);
- LoopBuilder loop_builder(this,
- context(),
- LoopBuilder::kPostDecrement,
- shared_function_entry_length);
- HValue* array_length = Add<HLoadNamedField>(
- optimized_map, nullptr, HObjectAccess::ForFixedArrayLength());
- HValue* start_pos = AddUncasted<HSub>(array_length,
- shared_function_entry_length);
- HValue* slot_iterator = loop_builder.BeginBody(start_pos,
- first_entry_index,
- Token::GT);
+ IfBuilder done_check(this);
+ BuildCheckAndInstallOptimizedCode(js_function, native_context,
+ &done_check, optimized_map,
+ slot_iterator);
+ // Fall out of the loop
+ loop_builder.Break();
+ }
+ loop_builder.EndBody();
+
+ // If {slot_iterator} is less than the first entry index, then we failed to
+ // find a context-dependent code and try context-independent code next.
+ IfBuilder no_optimized_code_check(this);
+ no_optimized_code_check.If<HCompareNumericAndBranch>(
+ slot_iterator, first_entry_index, Token::LT);
+ no_optimized_code_check.Then();
+ {
+ IfBuilder shared_code_check(this);
+ HValue* shared_code =
+ Add<HLoadNamedField>(optimized_map, nullptr,
+ HObjectAccess::ForOptimizedCodeMapSharedCode());
+ shared_code_check.IfNot<HCompareObjectEqAndBranch>(
+ shared_code, graph()->GetConstantUndefined());
+ shared_code_check.Then();
{
- IfBuilder done_check(this);
- BuildCheckAndInstallOptimizedCode(js_function, native_context,
- &done_check,
- optimized_map,
- slot_iterator);
- // Fall out of the loop
- loop_builder.Break();
+ // Store the context-independent optimized code.
+ HValue* literals = Add<HConstant>(factory->empty_fixed_array());
+ BuildInstallOptimizedCode(js_function, native_context, shared_code,
+ literals);
}
- loop_builder.EndBody();
-
- // If slot_iterator equals first entry index, then we failed to find a
- // context-dependent code and try context-independent code next.
- IfBuilder no_optimized_code_check(this);
- no_optimized_code_check.If<HCompareNumericAndBranch>(
- slot_iterator, first_entry_index, Token::EQ);
- no_optimized_code_check.Then();
+ shared_code_check.Else();
{
- IfBuilder shared_code_check(this);
- HValue* shared_code = Add<HLoadNamedField>(
- optimized_map, nullptr,
- HObjectAccess::ForOptimizedCodeMapSharedCode());
- shared_code_check.IfNot<HCompareObjectEqAndBranch>(
- shared_code, graph()->GetConstantUndefined());
- shared_code_check.Then();
- {
- // Store the context-independent optimized code.
- HValue* literals = Add<HConstant>(factory->empty_fixed_array());
- BuildInstallOptimizedCode(js_function, native_context, shared_code,
- literals);
- }
- shared_code_check.Else();
- {
- // Store the unoptimized code.
- BuildInstallCode(js_function, shared_info);
- }
+ // Store the unoptimized code.
+ BuildInstallCode(js_function, shared_info);
}
}
}
@@ -1982,13 +1983,6 @@ class CodeStubGraphBuilder<KeyedLoadGenericStub>
HValue* bit_field2,
ElementsKind kind);
- void BuildExternalElementLoad(HGraphBuilder::IfBuilder* if_builder,
- HValue* receiver,
- HValue* key,
- HValue* instance_type,
- HValue* bit_field2,
- ElementsKind kind);
-
KeyedLoadGenericStub* casted_stub() {
return static_cast<KeyedLoadGenericStub*>(stub());
}
@@ -2010,8 +2004,6 @@ void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildElementsKindLimitCheck(
void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildFastElementLoad(
HGraphBuilder::IfBuilder* if_builder, HValue* receiver, HValue* key,
HValue* instance_type, HValue* bit_field2, ElementsKind kind) {
- DCHECK(!IsExternalArrayElementsKind(kind));
-
BuildElementsKindLimitCheck(if_builder, bit_field2, kind);
IfBuilder js_array_check(this);
@@ -2031,20 +2023,6 @@ void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildFastElementLoad(
}
-void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildExternalElementLoad(
- HGraphBuilder::IfBuilder* if_builder, HValue* receiver, HValue* key,
- HValue* instance_type, HValue* bit_field2, ElementsKind kind) {
- DCHECK(IsExternalArrayElementsKind(kind));
-
- BuildElementsKindLimitCheck(if_builder, bit_field2, kind);
-
- Push(BuildUncheckedMonomorphicElementAccess(receiver, key, NULL,
- false, kind,
- LOAD, NEVER_RETURN_HOLE,
- STANDARD_STORE));
-}
-
-
HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
HValue* key = GetParameter(LoadDescriptor::kNameIndex);
@@ -2106,42 +2084,6 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
Deoptimizer::EAGER);
Push(graph()->GetConstant0());
- kind_if.Else();
- BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
- EXTERNAL_INT8_ELEMENTS);
-
- kind_if.Else();
- BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
- EXTERNAL_UINT8_ELEMENTS);
-
- kind_if.Else();
- BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
- EXTERNAL_INT16_ELEMENTS);
-
- kind_if.Else();
- BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
- EXTERNAL_UINT16_ELEMENTS);
-
- kind_if.Else();
- BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
- EXTERNAL_INT32_ELEMENTS);
-
- kind_if.Else();
- BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
- EXTERNAL_UINT32_ELEMENTS);
-
- kind_if.Else();
- BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
- EXTERNAL_FLOAT32_ELEMENTS);
-
- kind_if.Else();
- BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
- EXTERNAL_FLOAT64_ELEMENTS);
-
- kind_if.Else();
- BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
- EXTERNAL_UINT8_CLAMPED_ELEMENTS);
-
kind_if.ElseDeopt(
Deoptimizer::kElementsKindUnhandledInKeyedLoadGenericStub);
@@ -2229,7 +2171,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
index->ClearFlag(HValue::kCanOverflow);
HValue* property_index =
Add<HLoadKeyed>(cache_field_offsets, index, nullptr,
- EXTERNAL_INT32_ELEMENTS, NEVER_RETURN_HOLE, 0);
+ INT32_ELEMENTS, NEVER_RETURN_HOLE, 0);
Push(property_index);
}
lookup_if->Else();
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index bcc1fe8801..b48b828ae7 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -19,13 +19,18 @@ namespace v8 {
namespace internal {
+RUNTIME_FUNCTION(UnexpectedStubMiss) {
+ FATAL("Unexpected deopt of a stub");
+ return Smi::FromInt(0);
+}
+
+
CodeStubDescriptor::CodeStubDescriptor(CodeStub* stub)
: call_descriptor_(stub->GetCallInterfaceDescriptor()),
stack_parameter_count_(no_reg),
hint_stack_parameter_count_(-1),
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
deoptimization_handler_(NULL),
- handler_arguments_mode_(DONT_PASS_ARGUMENTS),
miss_handler_(),
has_miss_handler_(false) {
stub->InitializeDescriptor(this);
@@ -37,7 +42,6 @@ CodeStubDescriptor::CodeStubDescriptor(Isolate* isolate, uint32_t stub_key)
hint_stack_parameter_count_(-1),
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
deoptimization_handler_(NULL),
- handler_arguments_mode_(DONT_PASS_ARGUMENTS),
miss_handler_(),
has_miss_handler_(false) {
CodeStub::InitializeDescriptor(isolate, stub_key, this);
@@ -56,11 +60,9 @@ void CodeStubDescriptor::Initialize(Address deoptimization_handler,
void CodeStubDescriptor::Initialize(Register stack_parameter_count,
Address deoptimization_handler,
int hint_stack_parameter_count,
- StubFunctionMode function_mode,
- HandlerArgumentsMode handler_mode) {
+ StubFunctionMode function_mode) {
Initialize(deoptimization_handler, hint_stack_parameter_count, function_mode);
stack_parameter_count_ = stack_parameter_count;
- handler_arguments_mode_ = handler_mode;
}
@@ -467,11 +469,8 @@ namespace {
Handle<JSFunction> GetFunction(Isolate* isolate, const char* name) {
v8::ExtensionConfiguration no_extensions;
- Handle<Context> ctx = isolate->bootstrapper()->CreateEnvironment(
- MaybeHandle<JSGlobalProxy>(), v8::Local<v8::ObjectTemplate>(),
- &no_extensions);
- Handle<JSBuiltinsObject> builtins = handle(ctx->builtins());
- MaybeHandle<Object> fun = Object::GetProperty(isolate, builtins, name);
+ MaybeHandle<Object> fun = Object::GetProperty(
+ isolate, isolate->factory()->code_stub_exports_object(), name);
Handle<JSFunction> function = Handle<JSFunction>::cast(fun.ToHandleChecked());
DCHECK(!function->IsUndefined() &&
"JavaScript implementation of stub not found");
@@ -502,6 +501,8 @@ Handle<Code> TurboFanCodeStub::GenerateCode() {
ParseInfo parse_info(&zone, inner);
CompilationInfo info(&parse_info);
info.SetFunctionType(GetCallInterfaceDescriptor().GetFunctionType());
+ info.MarkAsContextSpecializing();
+ info.MarkAsDeoptimizationEnabled();
info.SetStub(this);
return info.GenerateCodeStub();
}
@@ -620,7 +621,8 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
void LoadDictionaryElementStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
- descriptor->Initialize(FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
+ descriptor->Initialize(
+ FUNCTION_ADDR(Runtime_KeyedLoadIC_MissFromStubFailure));
}
@@ -635,11 +637,13 @@ void KeyedLoadGenericStub::InitializeDescriptor(
void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
if (kind() == Code::STORE_IC) {
- descriptor->Initialize(FUNCTION_ADDR(StoreIC_MissFromStubFailure));
+ descriptor->Initialize(FUNCTION_ADDR(Runtime_StoreIC_MissFromStubFailure));
} else if (kind() == Code::KEYED_LOAD_IC) {
- descriptor->Initialize(FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
+ descriptor->Initialize(
+ FUNCTION_ADDR(Runtime_KeyedLoadIC_MissFromStubFailure));
} else if (kind() == Code::KEYED_STORE_IC) {
- descriptor->Initialize(FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure));
+ descriptor->Initialize(
+ FUNCTION_ADDR(Runtime_KeyedStoreIC_MissFromStubFailure));
}
}
@@ -656,13 +660,20 @@ CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() const {
void StoreFastElementStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
- descriptor->Initialize(FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure));
+ descriptor->Initialize(
+ FUNCTION_ADDR(Runtime_KeyedStoreIC_MissFromStubFailure));
}
void ElementsTransitionAndStoreStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
- descriptor->Initialize(FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss));
+ descriptor->Initialize(
+ FUNCTION_ADDR(Runtime_ElementsTransitionAndStoreIC_Miss));
+}
+
+
+void ToObjectStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ descriptor->Initialize(Runtime::FunctionForId(Runtime::kToObject)->entry);
}
@@ -687,7 +698,7 @@ void TypeofStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {}
void NumberToStringStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
NumberToStringDescriptor call_descriptor(isolate());
descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry);
}
@@ -716,7 +727,7 @@ void CreateWeakCellStub::InitializeDescriptor(CodeStubDescriptor* d) {}
void RegExpConstructResultStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kRegExpConstructResultRT)->entry);
+ Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
}
@@ -735,34 +746,35 @@ void AllocateHeapNumberStub::InitializeDescriptor(
void CompareNilICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(FUNCTION_ADDR(CompareNilIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
+ descriptor->Initialize(FUNCTION_ADDR(Runtime_CompareNilIC_Miss));
+ descriptor->SetMissHandler(ExternalReference(
+ Runtime::FunctionForId(Runtime::kCompareNilIC_Miss), isolate()));
}
void ToBooleanStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(FUNCTION_ADDR(ToBooleanIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
+ descriptor->Initialize(FUNCTION_ADDR(Runtime_ToBooleanIC_Miss));
+ descriptor->SetMissHandler(ExternalReference(
+ Runtime::FunctionForId(Runtime::kToBooleanIC_Miss), isolate()));
}
void BinaryOpICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(FUNCTION_ADDR(BinaryOpIC_Miss));
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
+ descriptor->Initialize(FUNCTION_ADDR(Runtime_BinaryOpIC_Miss));
+ descriptor->SetMissHandler(ExternalReference(
+ Runtime::FunctionForId(Runtime::kBinaryOpIC_Miss), isolate()));
}
void BinaryOpWithAllocationSiteStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
- descriptor->Initialize(FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
+ descriptor->Initialize(
+ FUNCTION_ADDR(Runtime_BinaryOpIC_MissWithAllocationSite));
}
void StringAddStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(Runtime::FunctionForId(Runtime::kStringAddRT)->entry);
+ descriptor->Initialize(Runtime::FunctionForId(Runtime::kStringAdd)->entry);
}
@@ -800,7 +812,6 @@ void StoreElementStub::Generate(MacroAssembler* masm) {
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: \
case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -950,6 +961,7 @@ std::ostream& operator<<(std::ostream& os, const ToBooleanStub::Types& s) {
if (s.Contains(ToBooleanStub::STRING)) p.Add("String");
if (s.Contains(ToBooleanStub::SYMBOL)) p.Add("Symbol");
if (s.Contains(ToBooleanStub::HEAP_NUMBER)) p.Add("HeapNumber");
+ if (s.Contains(ToBooleanStub::SIMD_VALUE)) p.Add("SimdValue");
return os << ")";
}
@@ -982,6 +994,9 @@ bool ToBooleanStub::Types::UpdateStatus(Handle<Object> object) {
Add(HEAP_NUMBER);
double value = HeapNumber::cast(*object)->value();
return value != 0 && !std::isnan(value);
+ } else if (object->IsSimd128Value()) {
+ Add(SIMD_VALUE);
+ return true;
} else {
// We should never see an internal object at runtime here!
UNREACHABLE();
@@ -991,16 +1006,10 @@ bool ToBooleanStub::Types::UpdateStatus(Handle<Object> object) {
bool ToBooleanStub::Types::NeedsMap() const {
- return Contains(ToBooleanStub::SPEC_OBJECT)
- || Contains(ToBooleanStub::STRING)
- || Contains(ToBooleanStub::SYMBOL)
- || Contains(ToBooleanStub::HEAP_NUMBER);
-}
-
-
-bool ToBooleanStub::Types::CanBeUndetectable() const {
- return Contains(ToBooleanStub::SPEC_OBJECT)
- || Contains(ToBooleanStub::STRING);
+ return Contains(ToBooleanStub::SPEC_OBJECT) ||
+ Contains(ToBooleanStub::STRING) || Contains(ToBooleanStub::SYMBOL) ||
+ Contains(ToBooleanStub::HEAP_NUMBER) ||
+ Contains(ToBooleanStub::SIMD_VALUE);
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index c06c6c1fe4..68d18c7a93 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -54,6 +54,7 @@ namespace internal {
V(StubFailureTrampoline) \
V(SubString) \
V(ToNumber) \
+ V(ToObject) \
V(VectorStoreICTrampoline) \
V(VectorKeyedStoreICTrampoline) \
V(VectorStoreIC) \
@@ -78,6 +79,7 @@ namespace internal {
V(InternalArrayNoArgumentConstructor) \
V(InternalArraySingleArgumentConstructor) \
V(KeyedLoadGeneric) \
+ V(LoadGlobalViaContext) \
V(LoadScriptContextField) \
V(LoadDictionaryElement) \
V(NameDictionaryLookup) \
@@ -85,6 +87,7 @@ namespace internal {
V(Typeof) \
V(RegExpConstructResult) \
V(StoreFastElement) \
+ V(StoreGlobalViaContext) \
V(StoreScriptContextField) \
V(StringAdd) \
V(ToBoolean) \
@@ -94,6 +97,7 @@ namespace internal {
/* TurboFanCodeStubs */ \
V(StringLengthTF) \
V(StringAddTF) \
+ /* TurboFanICs */ \
V(MathFloor) \
/* IC Handler stubs */ \
V(ArrayBufferViewLoadField) \
@@ -216,7 +220,9 @@ class CodeStub BASE_EMBEDDED {
virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() const = 0;
- virtual int GetStackParameterCount() const { return 0; }
+ virtual int GetStackParameterCount() const {
+ return GetCallInterfaceDescriptor().GetStackParameterCount();
+ }
virtual void InitializeDescriptor(CodeStubDescriptor* descriptor) {}
@@ -360,6 +366,26 @@ struct FakeStubForTesting : public CodeStub {
Handle<Code> GenerateCode() override; \
DEFINE_CODE_STUB(NAME, SUPER)
+#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER) \
+ public: \
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override { \
+ return DESC##Descriptor(isolate()); \
+ }; \
+ DEFINE_CODE_STUB(NAME, SUPER)
+
+#define DEFINE_TURBOFAN_IC(NAME, SUPER, DESC) \
+ public: \
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override { \
+ if (GetCallMode() == CALL_FROM_OPTIMIZED_CODE) { \
+ return DESC##CallFromOptimizedCodeDescriptor(isolate()); \
+ } else { \
+ return DESC##CallFromUnoptimizedCodeDescriptor(isolate()); \
+ } \
+ }; \
+ \
+ protected: \
+ DEFINE_CODE_STUB(NAME, SUPER)
+
#define DEFINE_HANDLER_CODE_STUB(NAME, SUPER) \
public: \
Handle<Code> GenerateCode() override; \
@@ -398,7 +424,6 @@ class PlatformCodeStub : public CodeStub {
enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE };
-enum HandlerArgumentsMode { DONT_PASS_ARGUMENTS, PASS_ARGUMENTS };
class CodeStubDescriptor {
@@ -413,8 +438,7 @@ class CodeStubDescriptor {
void Initialize(Register stack_parameter_count,
Address deoptimization_handler = NULL,
int hint_stack_parameter_count = -1,
- StubFunctionMode function_mode = NOT_JS_FUNCTION_STUB_MODE,
- HandlerArgumentsMode handler_mode = DONT_PASS_ARGUMENTS);
+ StubFunctionMode function_mode = NOT_JS_FUNCTION_STUB_MODE);
void SetMissHandler(ExternalReference handler) {
miss_handler_ = handler;
@@ -431,6 +455,14 @@ class CodeStubDescriptor {
return call_descriptor().GetRegisterParameterCount();
}
+ int GetStackParameterCount() const {
+ return call_descriptor().GetStackParameterCount();
+ }
+
+ int GetParameterCount() const {
+ return call_descriptor().GetParameterCount();
+ }
+
Register GetRegisterParameter(int index) const {
return call_descriptor().GetRegisterParameter(index);
}
@@ -449,8 +481,8 @@ class CodeStubDescriptor {
}
int GetHandlerParameterCount() const {
- int params = GetRegisterParameterCount();
- if (handler_arguments_mode_ == PASS_ARGUMENTS) {
+ int params = GetParameterCount();
+ if (PassesArgumentsToDeoptimizationHandler()) {
params += 1;
}
return params;
@@ -462,6 +494,10 @@ class CodeStubDescriptor {
Address deoptimization_handler() const { return deoptimization_handler_; }
private:
+ bool PassesArgumentsToDeoptimizationHandler() const {
+ return stack_parameter_count_.is_valid();
+ }
+
CallInterfaceDescriptor call_descriptor_;
Register stack_parameter_count_;
// If hint_stack_parameter_count_ > 0, the code stub can optimize the
@@ -470,7 +506,6 @@ class CodeStubDescriptor {
StubFunctionMode function_mode_;
Address deoptimization_handler_;
- HandlerArgumentsMode handler_arguments_mode_;
ExternalReference miss_handler_;
bool has_miss_handler_;
@@ -544,6 +579,33 @@ class TurboFanCodeStub : public CodeStub {
};
+class TurboFanIC : public TurboFanCodeStub {
+ public:
+ enum CallMode { CALL_FROM_UNOPTIMIZED_CODE, CALL_FROM_OPTIMIZED_CODE };
+
+ protected:
+ explicit TurboFanIC(Isolate* isolate, CallMode mode)
+ : TurboFanCodeStub(isolate) {
+ minor_key_ = CallModeBits::encode(mode);
+ }
+
+ CallMode GetCallMode() const { return CallModeBits::decode(minor_key_); }
+
+ void set_sub_minor_key(uint32_t key) {
+ minor_key_ = SubMinorKeyBits::update(minor_key_, key);
+ }
+
+ uint32_t sub_minor_key() const { return SubMinorKeyBits::decode(minor_key_); }
+
+ static const int kSubMinorKeyBits = kStubMinorKeyBits - 1;
+
+ private:
+ class CallModeBits : public BitField<CallMode, 0, 1> {};
+ class SubMinorKeyBits : public BitField<int, 1, kSubMinorKeyBits> {};
+ DEFINE_CODE_STUB_BASE(TurboFanIC, TurboFanCodeStub);
+};
+
+
// Helper interface to prepare to/restore after making runtime calls.
class RuntimeCallHelper {
public:
@@ -610,13 +672,12 @@ class NopRuntimeCallHelper : public RuntimeCallHelper {
};
-class MathFloorStub : public TurboFanCodeStub {
+class MathFloorStub : public TurboFanIC {
public:
- explicit MathFloorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
- int GetStackParameterCount() const override { return 1; }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(MathRoundVariant);
- DEFINE_CODE_STUB(MathFloor, TurboFanCodeStub);
+ explicit MathFloorStub(Isolate* isolate, TurboFanIC::CallMode mode)
+ : TurboFanIC(isolate, mode) {}
+ Code::Kind GetCodeKind() const override { return Code::CALL_IC; }
+ DEFINE_TURBOFAN_IC(MathFloor, TurboFanIC, MathRoundVariant);
};
@@ -964,11 +1025,6 @@ class CallICStub: public PlatformCodeStub {
minor_key_ = state.GetExtraICState();
}
- static int ExtractArgcFromMinorKey(int minor_key) {
- CallICState state(static_cast<ExtraICState>(minor_key));
- return state.arg_count();
- }
-
Code::Kind GetCodeKind() const override { return Code::CALL_IC; }
InlineCacheState GetICState() const override { return DEFAULT; }
@@ -1341,6 +1397,53 @@ class StoreGlobalStub : public HandlerStub {
};
+class LoadGlobalViaContextStub final : public PlatformCodeStub {
+ public:
+ static const int kMaximumDepth = 15;
+
+ LoadGlobalViaContextStub(Isolate* isolate, int depth)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = DepthBits::encode(depth);
+ }
+
+ int depth() const { return DepthBits::decode(minor_key_); }
+
+ private:
+ class DepthBits : public BitField<int, 0, 4> {};
+ STATIC_ASSERT(DepthBits::kMax == kMaximumDepth);
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadGlobalViaContext);
+ DEFINE_PLATFORM_CODE_STUB(LoadGlobalViaContext, PlatformCodeStub);
+};
+
+
+class StoreGlobalViaContextStub final : public PlatformCodeStub {
+ public:
+ static const int kMaximumDepth = 15;
+
+ StoreGlobalViaContextStub(Isolate* isolate, int depth,
+ LanguageMode language_mode)
+ : PlatformCodeStub(isolate) {
+ minor_key_ =
+ DepthBits::encode(depth) | LanguageModeBits::encode(language_mode);
+ }
+
+ int depth() const { return DepthBits::decode(minor_key_); }
+ LanguageMode language_mode() const {
+ return LanguageModeBits::decode(minor_key_);
+ }
+
+ private:
+ class DepthBits : public BitField<int, 0, 4> {};
+ STATIC_ASSERT(DepthBits::kMax == kMaximumDepth);
+ class LanguageModeBits : public BitField<LanguageMode, 4, 2> {};
+ STATIC_ASSERT(LANGUAGE_END == 3);
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreGlobalViaContext);
+ DEFINE_PLATFORM_CODE_STUB(StoreGlobalViaContext, PlatformCodeStub);
+};
+
+
class CallApiFunctionStub : public PlatformCodeStub {
public:
explicit CallApiFunctionStub(Isolate* isolate, bool call_data_undefined)
@@ -1864,10 +1967,6 @@ class CallFunctionStub: public PlatformCodeStub {
minor_key_ = ArgcBits::encode(argc) | FlagBits::encode(flags);
}
- static int ExtractArgcFromMinorKey(int minor_key) {
- return ArgcBits::decode(minor_key);
- }
-
private:
int argc() const { return ArgcBits::decode(minor_key_); }
int flags() const { return FlagBits::decode(minor_key_); }
@@ -2753,6 +2852,7 @@ class ToBooleanStub: public HydrogenCodeStub {
STRING,
SYMBOL,
HEAP_NUMBER,
+ SIMD_VALUE,
NUMBER_OF_TYPES
};
@@ -2773,7 +2873,9 @@ class ToBooleanStub: public HydrogenCodeStub {
bool UpdateStatus(Handle<Object> object);
bool NeedsMap() const;
- bool CanBeUndetectable() const;
+ bool CanBeUndetectable() const {
+ return Contains(ToBooleanStub::SPEC_OBJECT);
+ }
bool IsGeneric() const { return ToIntegral() == Generic().ToIntegral(); }
static Types Generic() { return Types((1 << NUMBER_OF_TYPES) - 1); }
@@ -2849,35 +2951,13 @@ class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
return StoreModeBits::decode(sub_minor_key());
}
- // Parameters accessed via CodeStubGraphBuilder::GetParameter()
- enum ParameterIndices {
- kValueIndex,
- kMapIndex,
- kKeyIndex,
- kObjectIndex,
- kParameterCount
- };
-
- static const Register ValueRegister() {
- return ElementTransitionAndStoreDescriptor::ValueRegister();
- }
- static const Register MapRegister() {
- return ElementTransitionAndStoreDescriptor::MapRegister();
- }
- static const Register KeyRegister() {
- return ElementTransitionAndStoreDescriptor::NameRegister();
- }
- static const Register ObjectRegister() {
- return ElementTransitionAndStoreDescriptor::ReceiverRegister();
- }
-
private:
class FromBits : public BitField<ElementsKind, 0, 8> {};
class ToBits : public BitField<ElementsKind, 8, 8> {};
class IsJSArrayBits : public BitField<bool, 16, 1> {};
class StoreModeBits : public BitField<KeyedAccessStoreMode, 17, 4> {};
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ElementTransitionAndStore);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreTransition);
DEFINE_HYDROGEN_CODE_STUB(ElementsTransitionAndStore, HydrogenCodeStub);
};
@@ -2973,6 +3053,15 @@ class ToNumberStub final : public PlatformCodeStub {
};
+class ToObjectStub final : public HydrogenCodeStub {
+ public:
+ explicit ToObjectStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ToObject);
+ DEFINE_HYDROGEN_CODE_STUB(ToObject, HydrogenCodeStub);
+};
+
+
class StringCompareStub : public PlatformCodeStub {
public:
explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
diff --git a/deps/v8/src/code-stubs.js b/deps/v8/src/code-stubs.js
new file mode 100644
index 0000000000..ab06f6c63b
--- /dev/null
+++ b/deps/v8/src/code-stubs.js
@@ -0,0 +1,69 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, code_stubs) {
+
+"use strict";
+
+code_stubs.StringLengthTFStub = function StringLengthTFStub(call_conv, minor_key) {
+ var stub = function(receiver, name, i, v) {
+ // i and v are dummy parameters mandated by the InterfaceDescriptor,
+ // (LoadWithVectorDescriptor).
+ return %_StringGetLength(%_JSValueGetValue(receiver));
+ }
+ return stub;
+}
+
+code_stubs.StringAddTFStub = function StringAddTFStub(call_conv, minor_key) {
+ var stub = function(left, right) {
+ return %StringAdd(left, right);
+ }
+ return stub;
+}
+
+const kTurboFanICCallModeMask = 1;
+const kTurboFanICCallForUnptimizedCode = 0;
+const kTurboFanICCallForOptimizedCode = 1;
+
+code_stubs.MathFloorStub = function MathFloorStub(call_conv, minor_key) {
+ var call_from_optimized_ic = function(f, i, tv, receiver, v) {
+ "use strict";
+ // |f| is this function's JSFunction
+ // |i| is TypeFeedbackVector slot # of callee's CallIC for Math.floor call
+ // |receiver| is receiver, should not be used
+ // |tv| is the calling function's type vector
+ // |v| is the value to floor
+ if (f !== %_FixedArrayGet(tv, i|0)) {
+ return %_CallFunction(receiver, v, f);
+ }
+ var r = %_MathFloor(+v);
+ if (%_IsMinusZero(r)) {
+ // Collect type feedback when the result of the floor is -0. This is
+ // accomplished by storing a sentinel in the second, "extra"
+ // TypeFeedbackVector slot corresponding to the Math.floor CallIC call in
+ // the caller's TypeVector.
+ %_FixedArraySet(tv, ((i|0)+1)|0, 1);
+ return -0;
+ }
+ // Return integers in smi range as smis.
+ var trunc = r|0;
+ if (trunc === r) {
+ return trunc;
+ }
+ return r;
+ }
+ var call_mode = (minor_key & kTurboFanICCallModeMask);
+ if (call_mode == kTurboFanICCallForOptimizedCode) {
+ return call_from_optimized_ic;
+ } else {
+ %SetForceInlineFlag(call_from_optimized_ic);
+ var call_from_unoptimized_ic = function(f, i, receiver, v) {
+ var tv = %_GetTypeFeedbackVector(%_GetCallerJSFunction());
+ return call_from_optimized_ic(f, i, tv, receiver, v);
+ }
+ return call_from_unoptimized_ic;
+ }
+}
+
+})
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index f25ce4df22..c9de2b1af1 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/codegen.h"
#if defined(V8_OS_AIX)
#include <fenv.h>
#endif
#include "src/bootstrapper.h"
-#include "src/codegen.h"
#include "src/compiler.h"
#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/parser.h"
#include "src/prettyprinter.h"
#include "src/rewriter.h"
@@ -129,7 +128,7 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
PrintF("%s", name == NULL ? "<unknown>" : name);
} else {
AllowDeferredHandleDereference allow_deference_for_trace;
- PrintF("%s", info->function()->debug_name()->ToCString().get());
+ PrintF("%s", info->literal()->debug_name()->ToCString().get());
}
PrintF("]\n");
}
@@ -138,12 +137,12 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
if (info->parse_info() && print_source) {
PrintF("--- Source from AST ---\n%s\n",
PrettyPrinter(info->isolate(), info->zone())
- .PrintProgram(info->function()));
+ .PrintProgram(info->literal()));
}
if (info->parse_info() && print_ast) {
PrintF("--- AST ---\n%s\n", AstPrinter(info->isolate(), info->zone())
- .PrintProgram(info->function()));
+ .PrintProgram(info->literal()));
}
#endif // DEBUG
}
@@ -183,13 +182,12 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
(info->IsOptimizing() && FLAG_print_opt_code));
if (print_code) {
const char* debug_name;
- SmartArrayPointer<char> debug_name_holder;
+ base::SmartArrayPointer<char> debug_name_holder;
if (info->IsStub()) {
CodeStub::Major major_key = info->code_stub()->MajorKey();
debug_name = CodeStub::MajorName(major_key, false);
} else {
- debug_name_holder =
- info->parse_info()->function()->debug_name()->ToCString();
+ debug_name_holder = info->literal()->debug_name()->ToCString();
debug_name = debug_name_holder.get();
}
@@ -197,21 +195,20 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
OFStream os(tracing_scope.file());
// Print the source code if available.
- FunctionLiteral* function = nullptr;
bool print_source =
info->parse_info() && (code->kind() == Code::OPTIMIZED_FUNCTION ||
code->kind() == Code::FUNCTION);
if (print_source) {
- function = info->function();
+ FunctionLiteral* literal = info->literal();
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
os << "--- Raw source ---\n";
StringCharacterStream stream(String::cast(script->source()),
- function->start_position());
+ literal->start_position());
// fun->end_position() points to the last character in the stream. We
// need to compensate by adding one to calculate the length.
int source_len =
- function->end_position() - function->start_position() + 1;
+ literal->end_position() - literal->start_position() + 1;
for (int i = 0; i < source_len; i++) {
if (stream.HasMore()) {
os << AsReversiblyEscapedUC16(stream.GetNext());
@@ -231,7 +228,8 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
os << "--- Code ---\n";
}
if (print_source) {
- os << "source_position = " << function->start_position() << "\n";
+ FunctionLiteral* literal = info->literal();
+ os << "source_position = " << literal->start_position() << "\n";
}
code->Disassemble(debug_name, os);
os << "--- End code ---\n";
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 79c2c33696..9025a9fca6 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -42,8 +42,6 @@
// CodeForDoWhileConditionPosition
// CodeForSourcePosition
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
#if V8_TARGET_ARCH_IA32
#include "src/ia32/codegen-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js
index ceab1642c5..6d43384e1c 100644
--- a/deps/v8/src/collection.js
+++ b/deps/v8/src/collection.js
@@ -4,14 +4,6 @@
var $getHash;
var $getExistingHash;
-var $mapSet;
-var $mapHas;
-var $mapDelete;
-var $setAdd;
-var $setHas;
-var $setDelete;
-var $mapFromArray;
-var $setFromArray;
(function(global, utils) {
"use strict";
@@ -268,7 +260,7 @@ function SetForEach(f, receiver) {
while (%SetIteratorNext(iterator, value_array)) {
if (stepping) %DebugPrepareStepInIfStepping(f);
key = value_array[0];
- var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
+ var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
%_CallFunction(new_receiver, key, key, this, f);
}
}
@@ -457,7 +449,7 @@ function MapForEach(f, receiver) {
var value_array = [UNDEFINED, UNDEFINED];
while (%MapIteratorNext(iterator, value_array)) {
if (stepping) %DebugPrepareStepInIfStepping(f);
- var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
+ var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
%_CallFunction(new_receiver, value_array[1], value_array[0], this, f);
}
}
@@ -487,15 +479,8 @@ utils.InstallFunctions(GlobalMap.prototype, DONT_ENUM, [
// Expose to the global scope.
$getHash = GetHash;
$getExistingHash = GetExistingHash;
-$mapGet = MapGet;
-$mapSet = MapSet;
-$mapHas = MapHas;
-$mapDelete = MapDelete;
-$setAdd = SetAdd;
-$setHas = SetHas;
-$setDelete = SetDelete;
-
-$mapFromArray = function(array) {
+
+function MapFromArray(array) {
var map = new GlobalMap;
var length = array.length;
for (var i = 0; i < length; i += 2) {
@@ -506,7 +491,7 @@ $mapFromArray = function(array) {
return map;
};
-$setFromArray = function(array) {
+function SetFromArray(array) {
var set = new GlobalSet;
var length = array.length;
for (var i = 0; i < length; ++i) {
@@ -515,4 +500,19 @@ $setFromArray = function(array) {
return set;
};
+// -----------------------------------------------------------------------
+// Exports
+
+utils.ExportToRuntime(function(to) {
+ to.MapGet = MapGet;
+ to.MapSet = MapSet;
+ to.MapHas = MapHas;
+ to.MapDelete = MapDelete;
+ to.SetAdd = SetAdd;
+ to.SetHas = SetHas;
+ to.SetDelete = SetDelete;
+ to.MapFromArray = MapFromArray;
+ to.SetFromArray = SetFromArray;
+});
+
})
diff --git a/deps/v8/src/compilation-dependencies.cc b/deps/v8/src/compilation-dependencies.cc
index e20015ca2f..643b88ab0e 100644
--- a/deps/v8/src/compilation-dependencies.cc
+++ b/deps/v8/src/compilation-dependencies.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compilation-dependencies.h"
+
#include "src/factory.h"
#include "src/handles-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/zone.h"
-#include "src/compilation-dependencies.h"
-
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compilation-dependencies.h b/deps/v8/src/compilation-dependencies.h
index 1ed6e5d9ba..c14220880f 100644
--- a/deps/v8/src/compilation-dependencies.h
+++ b/deps/v8/src/compilation-dependencies.h
@@ -5,6 +5,9 @@
#ifndef V8_DEPENDENCIES_H_
#define V8_DEPENDENCIES_H_
+#include "src/handles.h"
+#include "src/objects.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 0f3ebe0e67..c39936cc25 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -12,13 +12,15 @@
#include "src/compilation-cache.h"
#include "src/compiler/pipeline.h"
#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
+#include "src/debug/liveedit.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/gdb-jit.h"
#include "src/hydrogen.h"
+#include "src/interpreter/interpreter.h"
#include "src/lithium.h"
-#include "src/liveedit.h"
+#include "src/log-inl.h"
#include "src/messages.h"
#include "src/parser.h"
#include "src/prettyprinter.h"
@@ -62,10 +64,10 @@ PARSE_INFO_GETTER(Handle<Script>, script)
PARSE_INFO_GETTER(bool, is_eval)
PARSE_INFO_GETTER(bool, is_native)
PARSE_INFO_GETTER(bool, is_module)
+PARSE_INFO_GETTER(FunctionLiteral*, literal)
PARSE_INFO_GETTER_WITH_DEFAULT(LanguageMode, language_mode, STRICT)
PARSE_INFO_GETTER_WITH_DEFAULT(Handle<JSFunction>, closure,
Handle<JSFunction>::null())
-PARSE_INFO_GETTER(FunctionLiteral*, function)
PARSE_INFO_GETTER_WITH_DEFAULT(Scope*, scope, nullptr)
PARSE_INFO_GETTER(Handle<Context>, context)
PARSE_INFO_GETTER(Handle<SharedFunctionInfo>, shared_info)
@@ -101,6 +103,21 @@ bool CompilationInfo::has_shared_info() const {
}
+bool CompilationInfo::has_context() const {
+ return parse_info_ && !parse_info_->context().is_null();
+}
+
+
+bool CompilationInfo::has_literal() const {
+ return parse_info_ && parse_info_->literal() != nullptr;
+}
+
+
+bool CompilationInfo::has_scope() const {
+ return parse_info_ && parse_info_->scope() != nullptr;
+}
+
+
CompilationInfo::CompilationInfo(ParseInfo* parse_info)
: CompilationInfo(parse_info, nullptr, BASE, parse_info->isolate(),
parse_info->zone()) {
@@ -112,7 +129,6 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info)
// with deoptimization support.
if (isolate_->serializer_enabled()) EnableDeoptimizationSupport();
- if (isolate_->debug()->is_active()) MarkAsDebug();
if (FLAG_context_specialization) MarkAsContextSpecializing();
if (FLAG_turbo_inlining) MarkAsInliningEnabled();
if (FLAG_turbo_source_positions) MarkAsSourcePositionsEnabled();
@@ -203,24 +219,28 @@ Code::Flags CompilationInfo::flags() const {
// profiler, so they trigger their own optimization when they're called
// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
bool CompilationInfo::ShouldSelfOptimize() {
- return FLAG_crankshaft && !function()->flags()->Contains(kDontSelfOptimize) &&
- !function()->dont_optimize() &&
- function()->scope()->AllowsLazyCompilation() &&
+ return FLAG_crankshaft &&
+ !(literal()->flags() & AstProperties::kDontSelfOptimize) &&
+ !literal()->dont_optimize() &&
+ literal()->scope()->AllowsLazyCompilation() &&
(!has_shared_info() || !shared_info()->optimization_disabled());
}
void CompilationInfo::EnsureFeedbackVector() {
- if (feedback_vector_.is_null() ||
- feedback_vector_->SpecDiffersFrom(function()->feedback_vector_spec())) {
+ if (feedback_vector_.is_null()) {
feedback_vector_ = isolate()->factory()->NewTypeFeedbackVector(
- function()->feedback_vector_spec());
+ literal()->feedback_vector_spec());
}
+
+ // It's very important that recompiles do not alter the structure of the
+ // type feedback vector.
+ CHECK(!feedback_vector_->SpecDiffersFrom(literal()->feedback_vector_spec()));
}
-bool CompilationInfo::is_simple_parameter_list() {
- return scope()->is_simple_parameter_list();
+bool CompilationInfo::has_simple_parameters() {
+ return scope()->has_simple_parameters();
}
@@ -337,11 +357,10 @@ class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
DCHECK(info()->IsOptimizing());
- DCHECK(!info()->IsCompilingForDebugging());
// Do not use Crankshaft/TurboFan if we need to be able to set break points.
- if (isolate()->debug()->has_break_points()) {
- return RetryOptimization(kDebuggerHasBreakPoints);
+ if (info()->shared_info()->HasDebugInfo()) {
+ return AbortOptimization(kFunctionBeingDebugged);
}
// Limit the number of times we try to optimize functions.
@@ -658,6 +677,18 @@ static bool CompileUnoptimizedCode(CompilationInfo* info) {
}
+static bool GenerateBytecode(CompilationInfo* info) {
+ DCHECK(AllowCompilation::IsAllowed(info->isolate()));
+ if (!Compiler::Analyze(info->parse_info()) ||
+ !interpreter::Interpreter::MakeBytecode(info)) {
+ Isolate* isolate = info->isolate();
+ if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ return false;
+ }
+ return true;
+}
+
+
MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
CompilationInfo* info) {
VMState<COMPILER> state(info->isolate());
@@ -666,16 +697,21 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
// Parse and update CompilationInfo with the results.
if (!Parser::ParseStatic(info->parse_info())) return MaybeHandle<Code>();
Handle<SharedFunctionInfo> shared = info->shared_info();
- FunctionLiteral* lit = info->function();
+ FunctionLiteral* lit = info->literal();
shared->set_language_mode(lit->language_mode());
SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
MaybeDisableOptimization(shared, lit->dont_optimize_reason());
- // Compile unoptimized code.
- if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
+ if (FLAG_ignition && info->closure()->PassesFilter(FLAG_ignition_filter)) {
+ // Compile bytecode for the interpreter.
+ if (!GenerateBytecode(info)) return MaybeHandle<Code>();
+ } else {
+ // Compile unoptimized code.
+ if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
- CHECK_EQ(Code::FUNCTION, info->code()->kind());
- RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
+ CHECK_EQ(Code::FUNCTION, info->code()->kind());
+ RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
+ }
// Update the shared function info with the scope info. Allocating the
// ScopeInfo object may cause a GC.
@@ -745,26 +781,27 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
static bool Renumber(ParseInfo* parse_info) {
if (!AstNumbering::Renumber(parse_info->isolate(), parse_info->zone(),
- parse_info->function())) {
+ parse_info->literal())) {
return false;
}
Handle<SharedFunctionInfo> shared_info = parse_info->shared_info();
if (!shared_info.is_null()) {
- FunctionLiteral* lit = parse_info->function();
+ FunctionLiteral* lit = parse_info->literal();
shared_info->set_ast_node_count(lit->ast_node_count());
MaybeDisableOptimization(shared_info, lit->dont_optimize_reason());
- shared_info->set_dont_crankshaft(lit->flags()->Contains(kDontCrankshaft));
+ shared_info->set_dont_crankshaft(lit->flags() &
+ AstProperties::kDontCrankshaft);
}
return true;
}
bool Compiler::Analyze(ParseInfo* info) {
- DCHECK(info->function() != NULL);
+ DCHECK_NOT_NULL(info->literal());
if (!Rewriter::Rewrite(info)) return false;
if (!Scope::Analyze(info)) return false;
if (!Renumber(info)) return false;
- DCHECK(info->scope() != NULL);
+ DCHECK_NOT_NULL(info->scope());
return true;
}
@@ -903,18 +940,6 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
}
-MaybeHandle<Code> Compiler::GetUnoptimizedCode(
- Handle<SharedFunctionInfo> shared) {
- DCHECK(!shared->GetIsolate()->has_pending_exception());
- DCHECK(!shared->is_compiled());
-
- Zone zone;
- ParseInfo parse_info(&zone, shared);
- CompilationInfo info(&parse_info);
- return GetUnoptimizedCodeCommon(&info);
-}
-
-
bool Compiler::EnsureCompiled(Handle<JSFunction> function,
ClearExceptionFlag flag) {
if (function->is_compiled()) return true;
@@ -935,8 +960,8 @@ bool Compiler::EnsureCompiled(Handle<JSFunction> function,
// TODO(turbofan): In the future, unoptimized code with deopt support could
// be generated lazily once deopt is triggered.
bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
- DCHECK(info->function() != NULL);
- DCHECK(info->scope() != NULL);
+ DCHECK_NOT_NULL(info->literal());
+ DCHECK(info->has_scope());
Handle<SharedFunctionInfo> shared = info->shared_info();
if (!shared->has_deoptimization_support()) {
// TODO(titzer): just reuse the ParseInfo for the unoptimized compile.
@@ -944,7 +969,7 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
// Note that we use the same AST that we will use for generating the
// optimized code.
ParseInfo* parse_info = unoptimized.parse_info();
- parse_info->set_literal(info->function());
+ parse_info->set_literal(info->literal());
parse_info->set_scope(info->scope());
parse_info->set_context(info->context());
unoptimized.EnableDeoptimizationSupport();
@@ -977,42 +1002,79 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
}
-// Compile full code for debugging. This code will have debug break slots
-// and deoptimization information. Deoptimization information is required
-// in case that an optimized version of this function is still activated on
-// the stack. It will also make sure that the full code is compiled with
-// the same flags as the previous version, that is flags which can change
-// the code generated. The current method of mapping from already compiled
-// full code without debug break slots to full code with debug break slots
-// depends on the generated code is otherwise exactly the same.
-// If compilation fails, just keep the existing code.
-MaybeHandle<Code> Compiler::GetDebugCode(Handle<JSFunction> function) {
- CompilationInfoWithZone info(function);
+bool CompileEvalForDebugging(Handle<JSFunction> function,
+ Handle<SharedFunctionInfo> shared) {
+ Handle<Script> script(Script::cast(shared->script()));
+ Handle<Context> context(function->context());
+
+ Zone zone;
+ ParseInfo parse_info(&zone, script);
+ CompilationInfo info(&parse_info);
Isolate* isolate = info.isolate();
- VMState<COMPILER> state(isolate);
+ parse_info.set_eval();
+ parse_info.set_context(context);
+ if (context->IsNativeContext()) parse_info.set_global();
+ parse_info.set_toplevel();
+ parse_info.set_allow_lazy_parsing(false);
+ parse_info.set_language_mode(shared->language_mode());
+ parse_info.set_parse_restriction(NO_PARSE_RESTRICTION);
info.MarkAsDebug();
- DCHECK(!isolate->has_pending_exception());
- Handle<Code> old_code(function->shared()->code());
- DCHECK(old_code->kind() == Code::FUNCTION);
- DCHECK(!old_code->has_debug_break_slots());
+ VMState<COMPILER> state(info.isolate());
- info.MarkCompilingForDebugging();
- if (old_code->is_compiled_optimizable()) {
- info.EnableDeoptimizationSupport();
- } else {
- info.MarkNonOptimizable();
+ if (!Parser::ParseStatic(&parse_info)) {
+ isolate->clear_pending_exception();
+ return false;
}
- MaybeHandle<Code> maybe_new_code = GetUnoptimizedCodeCommon(&info);
- Handle<Code> new_code;
- if (!maybe_new_code.ToHandle(&new_code)) {
+
+ FunctionLiteral* lit = parse_info.literal();
+ LiveEditFunctionTracker live_edit_tracker(isolate, lit);
+
+ if (!CompileUnoptimizedCode(&info)) {
isolate->clear_pending_exception();
+ return false;
+ }
+ shared->ReplaceCode(*info.code());
+ return true;
+}
+
+
+bool CompileForDebugging(CompilationInfo* info) {
+ info->MarkAsDebug();
+ if (GetUnoptimizedCodeCommon(info).is_null()) {
+ info->isolate()->clear_pending_exception();
+ return false;
+ }
+ return true;
+}
+
+
+static inline bool IsEvalToplevel(Handle<SharedFunctionInfo> shared) {
+ return shared->is_toplevel() && shared->script()->IsScript() &&
+ Script::cast(shared->script())->compilation_type() ==
+ Script::COMPILATION_TYPE_EVAL;
+}
+
+
+bool Compiler::CompileDebugCode(Handle<JSFunction> function) {
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (IsEvalToplevel(shared)) {
+ return CompileEvalForDebugging(function, shared);
} else {
- DCHECK_EQ(old_code->is_compiled_optimizable(),
- new_code->is_compiled_optimizable());
+ CompilationInfoWithZone info(function);
+ return CompileForDebugging(&info);
}
- return maybe_new_code;
+}
+
+
+bool Compiler::CompileDebugCode(Handle<SharedFunctionInfo> shared) {
+ DCHECK(shared->allows_lazy_compilation_without_context());
+ DCHECK(!IsEvalToplevel(shared));
+ Zone zone;
+ ParseInfo parse_info(&zone, shared);
+ CompilationInfo info(&parse_info);
+ return CompileForDebugging(&info);
}
@@ -1029,7 +1091,7 @@ void Compiler::CompileForLiveEdit(Handle<Script> script) {
info.parse_info()->set_global();
if (!Parser::ParseStatic(info.parse_info())) return;
- LiveEditFunctionTracker tracker(info.isolate(), info.function());
+ LiveEditFunctionTracker tracker(info.isolate(), parse_info.literal());
if (!CompileUnoptimizedCode(&info)) return;
if (info.has_shared_info()) {
Handle<ScopeInfo> scope_info =
@@ -1062,13 +1124,14 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
{ VMState<COMPILER> state(info->isolate());
if (parse_info->literal() == NULL) {
- // Parse the script if needed (if it's already parsed, function() is
- // non-NULL).
+ // Parse the script if needed (if it's already parsed, literal() is
+ // non-NULL). If compiling for debugging, we may eagerly compile inner
+ // functions, so do not parse lazily in that case.
ScriptCompiler::CompileOptions options = parse_info->compile_options();
bool parse_allow_lazy = (options == ScriptCompiler::kConsumeParserCache ||
String::cast(script->source())->length() >
FLAG_min_preparse_length) &&
- !Compiler::DebuggerWantsEagerCompilation(isolate);
+ !info->is_debug();
parse_info->set_allow_lazy_parsing(parse_allow_lazy);
if (!parse_allow_lazy &&
@@ -1086,9 +1149,11 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
}
}
+ DCHECK(!info->is_debug() || !parse_info->allow_lazy_parsing());
+
info->MarkAsFirstCompile();
- FunctionLiteral* lit = info->function();
+ FunctionLiteral* lit = parse_info->literal();
LiveEditFunctionTracker live_edit_tracker(isolate, lit);
// Measure how long it takes to do the compilation; only take the
@@ -1116,6 +1181,10 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
SharedFunctionInfo::InitFromFunctionLiteral(result, lit);
SharedFunctionInfo::SetScript(result, script);
result->set_is_toplevel(true);
+ if (info->is_eval()) {
+ // Eval scripts cannot be (re-)compiled without context.
+ result->set_allows_lazy_compilation_without_context(false);
+ }
Handle<String> script_name = script->name()->IsString()
? Handle<String>(String::cast(script->name()))
@@ -1139,8 +1208,6 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
}
- isolate->debug()->OnAfterCompile(script);
-
return result;
}
@@ -1148,7 +1215,8 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context, LanguageMode language_mode,
- ParseRestriction restriction, int scope_position) {
+ ParseRestriction restriction, int line_offset, int column_offset,
+ Handle<Object> script_name, ScriptOriginOptions options) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
isolate->counters()->total_eval_size()->Increment(source_length);
@@ -1157,11 +1225,18 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
CompilationCache* compilation_cache = isolate->compilation_cache();
MaybeHandle<SharedFunctionInfo> maybe_shared_info =
compilation_cache->LookupEval(source, outer_info, context, language_mode,
- scope_position);
+ line_offset);
Handle<SharedFunctionInfo> shared_info;
+ Handle<Script> script;
if (!maybe_shared_info.ToHandle(&shared_info)) {
- Handle<Script> script = isolate->factory()->NewScript(source);
+ script = isolate->factory()->NewScript(source);
+ if (!script_name.is_null()) {
+ script->set_name(*script_name);
+ script->set_line_offset(Smi::FromInt(line_offset));
+ script->set_column_offset(Smi::FromInt(column_offset));
+ }
+ script->set_origin_options(options);
Zone zone;
ParseInfo parse_info(&zone, script);
CompilationInfo info(&parse_info);
@@ -1188,14 +1263,22 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
DCHECK(is_sloppy(language_mode) ||
is_strict(shared_info->language_mode()));
compilation_cache->PutEval(source, outer_info, context, shared_info,
- scope_position);
+ line_offset);
}
} else if (shared_info->ic_age() != isolate->heap()->global_ic_age()) {
shared_info->ResetForNewContext(isolate->heap()->global_ic_age());
}
- return isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared_info, context, NOT_TENURED);
+ Handle<JSFunction> result =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared_info, context, NOT_TENURED);
+
+ // OnAfterCompile has to be called after we create the JSFunction, which we
+ // may require to recompile the eval for debugging, if we find a function
+ // that contains break points in the eval script.
+ isolate->debug()->OnAfterCompile(script);
+
+ return result;
}
@@ -1317,7 +1400,11 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
}
}
- if (result.is_null()) isolate->ReportPendingMessages();
+ if (result.is_null()) {
+ isolate->ReportPendingMessages();
+ } else {
+ isolate->debug()->OnAfterCompile(script);
+ }
} else if (result->ic_age() != isolate->heap()->global_ic_age()) {
result->ResetForNewContext(isolate->heap()->global_ic_age());
}
@@ -1338,9 +1425,13 @@ Handle<SharedFunctionInfo> Compiler::CompileStreamedScript(
static_cast<LanguageMode>(parse_info->language_mode() | language_mode));
CompilationInfo compile_info(parse_info);
- // TODO(marja): FLAG_serialize_toplevel is not honoured and won't be; when the
- // real code caching lands, streaming needs to be adapted to use it.
- return CompileToplevel(&compile_info);
+
+ // The source was parsed lazily, so compiling for debugging is not possible.
+ DCHECK(!compile_info.is_debug());
+
+ Handle<SharedFunctionInfo> result = CompileToplevel(&compile_info);
+ if (!result.is_null()) isolate->debug()->OnAfterCompile(script);
+ return result;
}
@@ -1362,9 +1453,13 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
// We found an existing shared function info. If it's already compiled,
// don't worry about compiling it, and simply return it. If it's not yet
// compiled, continue to decide whether to eagerly compile.
+ // Carry on if we are compiling eager to obtain code for debugging,
+ // unless we already have code with debut break slots.
Handle<SharedFunctionInfo> existing;
if (maybe_existing.ToHandle(&existing) && existing->is_compiled()) {
- return existing;
+ if (!outer_info->is_debug() || existing->HasDebugCode()) {
+ return existing;
+ }
}
Zone zone;
@@ -1375,6 +1470,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
parse_info.set_language_mode(literal->scope()->language_mode());
if (outer_info->will_serialize()) info.PrepareForSerializing();
if (outer_info->is_first_compile()) info.MarkAsFirstCompile();
+ if (outer_info->is_debug()) info.MarkAsDebug();
LiveEditFunctionTracker live_edit_tracker(isolate, literal);
// Determine if the function can be lazily compiled. This is necessary to
@@ -1387,9 +1483,11 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
// of functions without an outer context when setting a breakpoint through
// Debug::FindSharedFunctionInfoInScript.
bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
- bool allow_lazy =
- literal->AllowsLazyCompilation() &&
- !DebuggerWantsEagerCompilation(isolate, allow_lazy_without_ctx);
+ // Compile eagerly for live edit. When compiling debug code, eagerly compile
+ // unless we can lazily compile without the context.
+ bool allow_lazy = literal->AllowsLazyCompilation() &&
+ !LiveEditFunctionTracker::IsActive(isolate) &&
+ (!info.is_debug() || allow_lazy_without_ctx);
if (outer_info->parse_info()->is_toplevel() && outer_info->will_serialize()) {
// Make sure that if the toplevel code (possibly to be serialized),
@@ -1443,11 +1541,6 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
// first time. It may have already been compiled previously.
result->set_never_compiled(outer_info->is_first_compile() && lazy);
- if (literal->scope()->new_target_var() != nullptr) {
- Handle<Code> stub(isolate->builtins()->JSConstructStubNewTarget());
- result->set_construct_stub(*stub);
- }
-
RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
result->set_allows_lazy_compilation(literal->AllowsLazyCompilation());
result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
@@ -1459,8 +1552,8 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
return result;
} else if (!lazy) {
- // We have additional data from compilation now.
- DCHECK(!existing->is_compiled());
+ // Assert that we are not overwriting (possibly patched) debug code.
+ DCHECK(!existing->HasDebugCode());
existing->ReplaceCode(*info.code());
existing->set_scope_info(*scope_info);
existing->set_feedback_vector(*info.feedback_vector());
@@ -1474,6 +1567,10 @@ MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
ConcurrencyMode mode,
BailoutId osr_ast_id,
JavaScriptFrame* osr_frame) {
+ Isolate* isolate = function->GetIsolate();
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ if (shared->HasDebugInfo()) return MaybeHandle<Code>();
+
Handle<Code> cached_code;
if (GetCodeFromOptimizedCodeMap(
function, osr_ast_id).ToHandle(&cached_code)) {
@@ -1488,10 +1585,8 @@ MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
return cached_code;
}
- Isolate* isolate = function->GetIsolate();
DCHECK(AllowCompilation::IsAllowed(isolate));
- Handle<SharedFunctionInfo> shared(function->shared(), isolate);
if (!shared->is_compiled() ||
shared->scope_info() == ScopeInfo::Empty(isolate)) {
// The function was never compiled. Compile it unoptimized first.
@@ -1518,7 +1613,8 @@ MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
return MaybeHandle<Code>();
}
- SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(function));
+ base::SmartPointer<CompilationInfo> info(
+ new CompilationInfoWithZone(function));
VMState<COMPILER> state(isolate);
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
@@ -1543,7 +1639,7 @@ MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
Handle<Code> Compiler::GetConcurrentlyOptimizedCode(OptimizedCompileJob* job) {
// Take ownership of compilation info. Deleting compilation info
// also tears down the zone and the recompile job.
- SmartPointer<CompilationInfo> info(job->info());
+ base::SmartPointer<CompilationInfo> info(job->info());
Isolate* isolate = info->isolate();
VMState<COMPILER> state(isolate);
@@ -1552,19 +1648,18 @@ Handle<Code> Compiler::GetConcurrentlyOptimizedCode(OptimizedCompileJob* job) {
Handle<SharedFunctionInfo> shared = info->shared_info();
shared->code()->set_profiler_ticks(0);
+ DCHECK(!shared->HasDebugInfo());
+
// 1) Optimization on the concurrent thread may have failed.
// 2) The function may have already been optimized by OSR. Simply continue.
// Except when OSR already disabled optimization for some reason.
// 3) The code may have already been invalidated due to dependency change.
- // 4) Debugger may have been activated.
- // 5) Code generation may have failed.
+ // 4) Code generation may have failed.
if (job->last_status() == OptimizedCompileJob::SUCCEEDED) {
if (shared->optimization_disabled()) {
job->RetryOptimization(kOptimizationDisabled);
} else if (info->dependencies()->HasAborted()) {
job->RetryOptimization(kBailedOutDueToDependencyChange);
- } else if (isolate->debug()->has_break_points()) {
- job->RetryOptimization(kDebuggerHasBreakPoints);
} else if (job->GenerateCode() == OptimizedCompileJob::SUCCEEDED) {
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info.get(), shared);
if (shared->SearchOptimizedCodeMap(info->context()->native_context(),
@@ -1590,15 +1685,6 @@ Handle<Code> Compiler::GetConcurrentlyOptimizedCode(OptimizedCompileJob* job) {
}
-bool Compiler::DebuggerWantsEagerCompilation(Isolate* isolate,
- bool allow_lazy_without_ctx) {
- if (LiveEditFunctionTracker::IsActive(isolate)) return true;
- Debug* debug = isolate->debug();
- bool debugging = debug->is_active() || debug->has_break_points();
- return debugging && !allow_lazy_without_ctx;
-}
-
-
CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
: name_(name), info_(info) {
if (FLAG_hydrogen_stats) {
@@ -1633,7 +1719,7 @@ bool CompilationPhase::ShouldProduceTraceOutput() const {
#if DEBUG
void CompilationInfo::PrintAstForTesting() {
PrintF("--- Source from AST ---\n%s\n",
- PrettyPrinter(isolate(), zone()).PrintProgram(function()));
+ PrettyPrinter(isolate(), zone()).PrintProgram(literal()));
}
#endif
} // namespace internal
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 45863f6b28..4775111362 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -120,18 +120,17 @@ class CompilationInfo {
kMustNotHaveEagerFrame = 1 << 4,
kDeoptimizationSupport = 1 << 5,
kDebug = 1 << 6,
- kCompilingForDebugging = 1 << 7,
- kSerializing = 1 << 8,
- kContextSpecializing = 1 << 9,
- kFrameSpecializing = 1 << 10,
- kInliningEnabled = 1 << 11,
- kTypingEnabled = 1 << 12,
- kDisableFutureOptimization = 1 << 13,
- kSplittingEnabled = 1 << 14,
- kTypeFeedbackEnabled = 1 << 15,
- kDeoptimizationEnabled = 1 << 16,
- kSourcePositionsEnabled = 1 << 17,
- kFirstCompile = 1 << 18,
+ kSerializing = 1 << 7,
+ kContextSpecializing = 1 << 8,
+ kFrameSpecializing = 1 << 9,
+ kInliningEnabled = 1 << 10,
+ kTypingEnabled = 1 << 11,
+ kDisableFutureOptimization = 1 << 12,
+ kSplittingEnabled = 1 << 13,
+ kTypeFeedbackEnabled = 1 << 14,
+ kDeoptimizationEnabled = 1 << 15,
+ kSourcePositionsEnabled = 1 << 16,
+ kFirstCompile = 1 << 17,
};
explicit CompilationInfo(ParseInfo* parse_info);
@@ -149,12 +148,15 @@ class CompilationInfo {
bool is_module() const;
LanguageMode language_mode() const;
Handle<JSFunction> closure() const;
- FunctionLiteral* function() const;
+ FunctionLiteral* literal() const;
Scope* scope() const;
bool MayUseThis() const;
Handle<Context> context() const;
Handle<SharedFunctionInfo> shared_info() const;
bool has_shared_info() const;
+ bool has_context() const;
+ bool has_literal() const;
+ bool has_scope() const;
// -----------------------------------------------------------
Isolate* isolate() const {
@@ -172,7 +174,6 @@ class CompilationInfo {
bool is_this_defined() const;
int num_heap_slots() const;
Code::Flags flags() const;
- bool has_scope() const { return scope() != nullptr; }
void set_parameter_count(int parameter_count) {
DCHECK(IsStub());
@@ -207,7 +208,13 @@ class CompilationInfo {
return GetFlag(kMustNotHaveEagerFrame);
}
- void MarkAsDebug() { SetFlag(kDebug); }
+ // Compiles marked as debug produce unoptimized code with debug break slots.
+ // Inner functions that cannot be compiled w/o context are compiled eagerly.
+ // Always include deoptimization support to avoid having to recompile again.
+ void MarkAsDebug() {
+ SetFlag(kDebug);
+ SetFlag(kDeoptimizationSupport);
+ }
bool is_debug() const { return GetFlag(kDebug); }
@@ -270,12 +277,6 @@ class CompilationInfo {
}
void SetCode(Handle<Code> code) { code_ = code; }
- void MarkCompilingForDebugging() { SetFlag(kCompilingForDebugging); }
- bool IsCompilingForDebugging() { return GetFlag(kCompilingForDebugging); }
- void MarkNonOptimizable() {
- SetMode(CompilationInfo::NONOPT);
- }
-
bool ShouldTrapOnDeopt() const {
return (FLAG_trap_on_deopt && IsOptimizing()) ||
(FLAG_trap_on_stub_deopt && IsStub());
@@ -401,7 +402,7 @@ class CompilationInfo {
void PrintAstForTesting();
#endif
- bool is_simple_parameter_list();
+ bool has_simple_parameters();
Handle<Code> GenerateCodeStub();
@@ -426,12 +427,9 @@ class CompilationInfo {
// Compilation mode.
// BASE is generated by the full codegen, optionally prepared for bailouts.
// OPTIMIZE is optimized code generated by the Hydrogen-based backend.
- // NONOPT is generated by the full codegen and is not prepared for
- // recompilation/bailouts. These functions are never recompiled.
enum Mode {
BASE,
OPTIMIZE,
- NONOPT,
STUB
};
@@ -627,10 +625,9 @@ class Compiler : public AllStatic {
Handle<JSFunction> function);
MUST_USE_RESULT static MaybeHandle<Code> GetLazyCode(
Handle<JSFunction> function);
- MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCode(
- Handle<SharedFunctionInfo> shared);
- MUST_USE_RESULT static MaybeHandle<Code> GetDebugCode(
- Handle<JSFunction> function);
+
+ static bool CompileDebugCode(Handle<JSFunction> function);
+ static bool CompileDebugCode(Handle<SharedFunctionInfo> shared);
// Parser::Parse, then Compiler::Analyze.
static bool ParseAndAnalyze(ParseInfo* info);
@@ -648,7 +645,9 @@ class Compiler : public AllStatic {
MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context, LanguageMode language_mode,
- ParseRestriction restriction, int scope_position);
+ ParseRestriction restriction, int line_offset, int column_offset = 0,
+ Handle<Object> script_name = Handle<Object>(),
+ ScriptOriginOptions options = ScriptOriginOptions());
// Compile a String source within a context.
static Handle<SharedFunctionInfo> CompileScript(
@@ -680,10 +679,6 @@ class Compiler : public AllStatic {
// Generate and return code from previously queued optimization job.
// On failure, return the empty handle.
static Handle<Code> GetConcurrentlyOptimizedCode(OptimizedCompileJob* job);
-
- // TODO(titzer): move this method out of the compiler.
- static bool DebuggerWantsEagerCompilation(
- Isolate* isolate, bool allow_lazy_without_ctx = false);
};
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index b2dd031b29..7f7a39bb9e 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -1 +1,6 @@
+set noparent
+
+bmeurer@chromium.org
+jarin@chromium.org
+mstarzinger@chromium.org
titzer@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 5046fef593..b54982f4c9 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -11,50 +11,58 @@ namespace compiler {
// static
FieldAccess AccessBuilder::ForMap() {
- return {kTaggedBase, HeapObject::kMapOffset, MaybeHandle<Name>(), Type::Any(),
- kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, HeapObject::kMapOffset,
+ MaybeHandle<Name>(), Type::Any(), kMachAnyTagged};
+ return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectProperties() {
- return {kTaggedBase, JSObject::kPropertiesOffset, MaybeHandle<Name>(),
- Type::Any(), kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, JSObject::kPropertiesOffset,
+ MaybeHandle<Name>(), Type::Any(), kMachAnyTagged};
+ return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectElements() {
- return {kTaggedBase, JSObject::kElementsOffset, MaybeHandle<Name>(),
- Type::Internal(), kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
+ MaybeHandle<Name>(), Type::Internal(), kMachAnyTagged};
+ return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionContext() {
- return {kTaggedBase, JSFunction::kContextOffset, MaybeHandle<Name>(),
- Type::Internal(), kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, JSFunction::kContextOffset,
+ MaybeHandle<Name>(), Type::Internal(), kMachAnyTagged};
+ return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
- return {kTaggedBase, JSFunction::kSharedFunctionInfoOffset, Handle<Name>(),
- Type::Any(), kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, JSFunction::kSharedFunctionInfoOffset,
+ Handle<Name>(), Type::Any(), kMachAnyTagged};
+ return access;
}
// static
FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
- return {kTaggedBase, JSArrayBuffer::kBackingStoreOffset, MaybeHandle<Name>(),
- Type::UntaggedPointer(), kMachPtr};
+ FieldAccess access = {kTaggedBase, JSArrayBuffer::kBackingStoreOffset,
+ MaybeHandle<Name>(), Type::UntaggedPointer(), kMachPtr};
+ return access;
}
// static
FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
- return {kTaggedBase, JSDate::kValueOffset + index * kPointerSize,
- MaybeHandle<Name>(), Type::Number(), kMachAnyTagged};
+ FieldAccess access = {kTaggedBase,
+ JSDate::kValueOffset + index * kPointerSize,
+ MaybeHandle<Name>(), Type::Number(), kMachAnyTagged};
+ return access;
}
@@ -64,65 +72,71 @@ FieldAccess AccessBuilder::ForFixedArrayLength() {
// field, although it's not the best. If we had a Zone we could create an
// appropriate range type instead.
STATIC_ASSERT(FixedArray::kMaxLength <= 1 << 30);
- return {kTaggedBase, FixedArray::kLengthOffset, MaybeHandle<Name>(),
- Type::Intersect(Type::Unsigned30(), Type::TaggedSigned()),
- kMachAnyTagged};
-}
-
-
-// static
-FieldAccess AccessBuilder::ForExternalArrayPointer() {
- return {kTaggedBase, ExternalArray::kExternalPointerOffset,
- MaybeHandle<Name>(), Type::UntaggedPointer(), kMachPtr};
+ FieldAccess access = {
+ kTaggedBase, FixedArray::kLengthOffset, MaybeHandle<Name>(),
+ Type::Intersect(Type::Unsigned30(), Type::TaggedSigned()),
+ kMachAnyTagged};
+ return access;
}
// static
FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
- return {kTaggedBase, DescriptorArray::kEnumCacheOffset, Handle<Name>(),
- Type::TaggedPointer(), kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, DescriptorArray::kEnumCacheOffset,
+ Handle<Name>(), Type::TaggedPointer(), kMachAnyTagged};
+ return access;
}
// static
FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
- return {kTaggedBase, DescriptorArray::kEnumCacheBridgeCacheOffset,
- Handle<Name>(), Type::TaggedPointer(), kMachAnyTagged};
+ FieldAccess access = {kTaggedBase,
+ DescriptorArray::kEnumCacheBridgeCacheOffset,
+ Handle<Name>(), Type::TaggedPointer(), kMachAnyTagged};
+ return access;
}
// static
FieldAccess AccessBuilder::ForMapBitField3() {
- return {kTaggedBase, Map::kBitField3Offset, Handle<Name>(),
- Type::UntaggedUnsigned32(), kMachUint32};
+ FieldAccess access = {kTaggedBase, Map::kBitField3Offset, Handle<Name>(),
+ Type::UntaggedUnsigned32(), kMachUint32};
+ return access;
}
// static
FieldAccess AccessBuilder::ForMapDescriptors() {
- return {kTaggedBase, Map::kDescriptorsOffset, Handle<Name>(),
- Type::TaggedPointer(), kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, Map::kDescriptorsOffset, Handle<Name>(),
+ Type::TaggedPointer(), kMachAnyTagged};
+ return access;
}
// static
FieldAccess AccessBuilder::ForMapInstanceType() {
- return {kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
- Type::UntaggedUnsigned8(), kMachUint8};
+ FieldAccess access = {kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
+ Type::UntaggedUnsigned8(), kMachUint8};
+ return access;
}
// static
FieldAccess AccessBuilder::ForStringLength(Zone* zone) {
- return {kTaggedBase, String::kLengthOffset, Handle<Name>(),
- Type::Range(0, String::kMaxLength, zone), kMachAnyTagged};
+ FieldAccess access = {
+ kTaggedBase, String::kLengthOffset, Handle<Name>(),
+ Type::Intersect(Type::Range(0, String::kMaxLength, zone),
+ Type::TaggedSigned(), zone),
+ kMachAnyTagged};
+ return access;
}
// static
FieldAccess AccessBuilder::ForValue() {
- return {kTaggedBase, JSValue::kValueOffset, Handle<Name>(), Type::Any(),
- kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, JSValue::kValueOffset, Handle<Name>(),
+ Type::Any(), kMachAnyTagged};
+ return access;
}
@@ -131,27 +145,33 @@ FieldAccess AccessBuilder::ForContextSlot(size_t index) {
int offset = Context::kHeaderSize + static_cast<int>(index) * kPointerSize;
DCHECK_EQ(offset,
Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
- return {kTaggedBase, offset, Handle<Name>(), Type::Any(), kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
+ kMachAnyTagged};
+ return access;
}
// static
FieldAccess AccessBuilder::ForPropertyCellValue() {
- return {kTaggedBase, PropertyCell::kValueOffset, Handle<Name>(), Type::Any(),
- kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, PropertyCell::kValueOffset, Handle<Name>(),
+ Type::Any(), kMachAnyTagged};
+ return access;
}
// static
FieldAccess AccessBuilder::ForSharedFunctionInfoTypeFeedbackVector() {
- return {kTaggedBase, SharedFunctionInfo::kFeedbackVectorOffset,
- Handle<Name>(), Type::Any(), kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, SharedFunctionInfo::kFeedbackVectorOffset,
+ Handle<Name>(), Type::Any(), kMachAnyTagged};
+ return access;
}
// static
ElementAccess AccessBuilder::ForFixedArrayElement() {
- return {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged};
+ ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
+ kMachAnyTagged};
+ return access;
}
@@ -161,61 +181,95 @@ ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
BaseTaggedness taggedness = is_external ? kUntaggedBase : kTaggedBase;
int header_size = is_external ? 0 : FixedTypedArrayBase::kDataOffset;
switch (type) {
- case kExternalInt8Array:
- return {taggedness, header_size, Type::Signed32(), kMachInt8};
+ case kExternalInt8Array: {
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ kMachInt8};
+ return access;
+ }
case kExternalUint8Array:
- case kExternalUint8ClampedArray:
- return {taggedness, header_size, Type::Unsigned32(), kMachUint8};
- case kExternalInt16Array:
- return {taggedness, header_size, Type::Signed32(), kMachInt16};
- case kExternalUint16Array:
- return {taggedness, header_size, Type::Unsigned32(), kMachUint16};
- case kExternalInt32Array:
- return {taggedness, header_size, Type::Signed32(), kMachInt32};
- case kExternalUint32Array:
- return {taggedness, header_size, Type::Unsigned32(), kMachUint32};
- case kExternalFloat32Array:
- return {taggedness, header_size, Type::Number(), kMachFloat32};
- case kExternalFloat64Array:
- return {taggedness, header_size, Type::Number(), kMachFloat64};
+ case kExternalUint8ClampedArray: {
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ kMachUint8};
+ return access;
+ }
+ case kExternalInt16Array: {
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ kMachInt16};
+ return access;
+ }
+ case kExternalUint16Array: {
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ kMachUint16};
+ return access;
+ }
+ case kExternalInt32Array: {
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ kMachInt32};
+ return access;
+ }
+ case kExternalUint32Array: {
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ kMachUint32};
+ return access;
+ }
+ case kExternalFloat32Array: {
+ ElementAccess access = {taggedness, header_size, Type::Number(),
+ kMachFloat32};
+ return access;
+ }
+ case kExternalFloat64Array: {
+ ElementAccess access = {taggedness, header_size, Type::Number(),
+ kMachFloat64};
+ return access;
+ }
}
UNREACHABLE();
- return {kUntaggedBase, 0, Type::None(), kMachNone};
+ ElementAccess access = {kUntaggedBase, 0, Type::None(), kMachNone};
+ return access;
}
// static
ElementAccess AccessBuilder::ForSeqStringChar(String::Encoding encoding) {
switch (encoding) {
- case String::ONE_BYTE_ENCODING:
- return {kTaggedBase, SeqString::kHeaderSize, Type::Unsigned32(),
- kMachUint8};
- case String::TWO_BYTE_ENCODING:
- return {kTaggedBase, SeqString::kHeaderSize, Type::Unsigned32(),
- kMachUint16};
+ case String::ONE_BYTE_ENCODING: {
+ ElementAccess access = {kTaggedBase, SeqString::kHeaderSize,
+ Type::Unsigned32(), kMachUint8};
+ return access;
+ }
+ case String::TWO_BYTE_ENCODING: {
+ ElementAccess access = {kTaggedBase, SeqString::kHeaderSize,
+ Type::Unsigned32(), kMachUint16};
+ return access;
+ }
}
UNREACHABLE();
- return {kUntaggedBase, 0, Type::None(), kMachNone};
+ ElementAccess access = {kUntaggedBase, 0, Type::None(), kMachNone};
+ return access;
}
// static
FieldAccess AccessBuilder::ForStatsCounter() {
- return {kUntaggedBase, 0, MaybeHandle<Name>(), Type::Signed32(), kMachInt32};
+ FieldAccess access = {kUntaggedBase, 0, MaybeHandle<Name>(), Type::Signed32(),
+ kMachInt32};
+ return access;
}
// static
FieldAccess AccessBuilder::ForFrameCallerFramePtr() {
- return {kUntaggedBase, StandardFrameConstants::kCallerFPOffset,
- MaybeHandle<Name>(), Type::Internal(), kMachPtr};
+ FieldAccess access = {kUntaggedBase, StandardFrameConstants::kCallerFPOffset,
+ MaybeHandle<Name>(), Type::Internal(), kMachPtr};
+ return access;
}
// static
FieldAccess AccessBuilder::ForFrameMarker() {
- return {kUntaggedBase, StandardFrameConstants::kMarkerOffset,
- MaybeHandle<Name>(), Type::Tagged(), kMachAnyTagged};
+ FieldAccess access = {kUntaggedBase, StandardFrameConstants::kMarkerOffset,
+ MaybeHandle<Name>(), Type::Tagged(), kMachAnyTagged};
+ return access;
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 240ffdcb5d..95be3e0dd8 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -43,9 +43,6 @@ class AccessBuilder final : public AllStatic {
// Provides access to FixedArray::length() field.
static FieldAccess ForFixedArrayLength();
- // Provides access to ExternalArray::external_pointer() field.
- static FieldAccess ForExternalArrayPointer();
-
// Provides access to DescriptorArray::enum_cache() field.
static FieldAccess ForDescriptorArrayEnumCache();
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index 0c97f846f0..dca6d4e3ec 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -8,6 +8,7 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
#include "src/scopes.h"
namespace v8 {
@@ -147,12 +148,9 @@ class ArmOperandConverter final : public InstructionOperandConverter {
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK(op != NULL);
- DCHECK(!op->IsRegister());
- DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- // The linkage computes where all spill slots are located.
- FrameOffset offset = linkage()->GetFrameOffset(
- AllocatedOperand::cast(op)->index(), frame(), 0);
+ FrameOffset offset =
+ linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -817,7 +815,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArmPush:
- __ Push(i.InputRegister(0));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ vstr(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ sub(sp, sp, Operand(kDoubleSize));
+ } else {
+ __ Push(i.InputRegister(0));
+ }
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmPoke: {
@@ -947,53 +950,25 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
- bool saved_pp;
if (FLAG_enable_embedded_constant_pool) {
__ Push(lr, fp, pp);
// Adjust FP to point to saved FP.
__ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
- saved_pp = true;
} else {
__ Push(lr, fp);
__ mov(fp, sp);
- saved_pp = false;
- }
- int register_save_area_size = saved_pp ? kPointerSize : 0;
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0 || saved_pp) {
- // Save callee-saved registers.
- __ stm(db_w, sp, saves);
- register_save_area_size +=
- kPointerSize * base::bits::CountPopulation32(saves);
- }
- const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
- if (saves_fp != 0) {
- // Save callee-saved FP registers.
- STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
- uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
- uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
- DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
-
- __ vstm(db_w, sp, DwVfpRegister::from_code(first),
- DwVfpRegister::from_code(last));
- register_save_area_size += 2 * kPointerSize * (last - first + 1);
- }
- if (register_save_area_size > 0) {
- frame()->SetRegisterSaveAreaSize(register_save_area_size);
}
} else if (descriptor->IsJSFunctionCall()) {
CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
} else if (needs_frame_) {
__ StubPrologue();
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ frame()->SetElidedFrameSizeInSlots(0);
}
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1006,62 +981,75 @@ void CodeGenerator::AssemblePrologue() {
osr_pc_offset_ = __ pc_offset();
// TODO(titzer): cannot address target function == local #-1
__ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
- stack_slots -= frame()->GetOsrStackSlotCount();
+ stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
- if (stack_slots > 0) {
- __ sub(sp, sp, Operand(stack_slots * kPointerSize));
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) {
+ stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+ }
+ if (stack_shrink_slots > 0) {
+ __ sub(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+ }
+
+ if (saves_fp != 0) {
+ // Save callee-saved FP registers.
+ STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
+ uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
+ uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
+ DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
+ __ vstm(db_w, sp, DwVfpRegister::from_code(first),
+ DwVfpRegister::from_code(last));
+ frame()->AllocateSavedCalleeRegisterSlots((last - first + 1) *
+ (kDoubleSize / kPointerSize));
+ }
+ const RegList saves = FLAG_enable_embedded_constant_pool
+ ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
+ : descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ // Save callee-saved registers.
+ __ stm(db_w, sp, saves);
+ frame()->AllocateSavedCalleeRegisterSlots(
+ base::bits::CountPopulation32(saves));
}
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+
+ // Restore registers.
+ const RegList saves = FLAG_enable_embedded_constant_pool
+ ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
+ : descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ ldm(ia_w, sp, saves);
+ }
+
+ // Restore FP registers.
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) {
+ STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
+ uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
+ uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
+ __ vldm(ia_w, sp, DwVfpRegister::from_code(first),
+ DwVfpRegister::from_code(last));
+ }
+
if (descriptor->kind() == CallDescriptor::kCallAddress) {
- if (frame()->GetRegisterSaveAreaSize() > 0) {
- // Remove this frame's spill slots first.
- if (stack_slots > 0) {
- __ add(sp, sp, Operand(stack_slots * kPointerSize));
- }
- // Restore FP registers.
- const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
- if (saves_fp != 0) {
- STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
- uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
- uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
- __ vldm(ia_w, sp, DwVfpRegister::from_code(first),
- DwVfpRegister::from_code(last));
- }
- // Restore registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) {
- __ ldm(ia_w, sp, saves);
- }
- }
__ LeaveFrame(StackFrame::MANUAL);
- __ Ret();
} else if (descriptor->IsJSFunctionCall() || needs_frame_) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ b(&return_label_);
+ return;
} else {
__ bind(&return_label_);
__ LeaveFrame(StackFrame::MANUAL);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : (info()->IsStub()
- ? info()->code_stub()->GetStackParameterCount()
- : 0);
- if (pop_count != 0) {
- __ Drop(pop_count);
- }
- __ Ret();
}
- } else {
- __ Ret();
}
+ __ Ret(pop_count);
}
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 8855388048..aa59f2cbb7 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -1117,16 +1117,18 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
- if (Node* node = buffer.pushed_nodes[n]) {
- int const slot = static_cast<int>(n);
- InstructionOperand value = g.UseRegister(node);
- Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(), value);
+ if (Node* input = buffer.pushed_nodes[n]) {
+ int slot = static_cast<int>(n);
+ Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(),
+ g.UseRegister(input));
}
}
} else {
// Push any stack arguments.
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- Emit(kArmPush, g.NoOutput(), g.UseRegister(node));
+ for (Node* input : base::Reversed(buffer.pushed_nodes)) {
+ // Skip any alignment holes in pushed nodes.
+ if (input == nullptr) continue;
+ Emit(kArmPush, g.NoOutput(), g.UseRegister(input));
}
}
@@ -1220,8 +1222,8 @@ void InstructionSelector::VisitTailCall(Node* node) {
InitializeCallBuffer(node, &buffer, true, false);
// Push any stack arguments.
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- Emit(kArmPush, g.NoOutput(), g.UseRegister(node));
+ for (Node* input : base::Reversed(buffer.pushed_nodes)) {
+ Emit(kArmPush, g.NoOutput(), g.UseRegister(input));
}
// Select the appropriate opcode based on the call type.
diff --git a/deps/v8/src/compiler/arm/linkage-arm.cc b/deps/v8/src/compiler/arm/linkage-arm.cc
deleted file mode 100644
index a923f1bf8d..0000000000
--- a/deps/v8/src/compiler/arm/linkage-arm.cc
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/linkage-impl.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-struct ArmLinkageHelperTraits {
- static Register ReturnValueReg() { return r0; }
- static Register ReturnValue2Reg() { return r1; }
- static Register JSCallFunctionReg() { return r1; }
- static Register ContextReg() { return cp; }
- static Register RuntimeCallFunctionReg() { return r1; }
- static Register RuntimeCallArgCountReg() { return r0; }
- static RegList CCalleeSaveRegisters() {
- return r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit() |
- r10.bit();
- }
- static RegList CCalleeSaveFPRegisters() {
- return (1 << d8.code()) | (1 << d9.code()) | (1 << d10.code()) |
- (1 << d11.code()) | (1 << d12.code()) | (1 << d13.code()) |
- (1 << d14.code()) | (1 << d15.code());
- }
- static Register CRegisterParameter(int i) {
- static Register register_parameters[] = {r0, r1, r2, r3};
- return register_parameters[i];
- }
- static int CRegisterParametersLength() { return 4; }
- static int CStackBackingStoreLength() { return 0; }
-};
-
-
-typedef LinkageHelper<ArmLinkageHelperTraits> LH;
-
-CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
- int parameter_count,
- CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Zone* zone, Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties) {
- return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
- properties);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
- int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties, MachineType return_type) {
- return LH::GetStubCallDescriptor(isolate, zone, descriptor,
- stack_parameter_count, flags, properties,
- return_type);
-}
-
-
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- const MachineSignature* sig) {
- return LH::GetSimplifiedCDescriptor(zone, sig);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index c3e9af6a29..1b68577772 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -4,10 +4,12 @@
#include "src/compiler/code-generator.h"
+#include "src/arm64/frames-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
#include "src/scopes.h"
namespace v8 {
@@ -184,12 +186,9 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
DCHECK(op != NULL);
- DCHECK(!op->IsRegister());
- DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- // The linkage computes where all spill slots are located.
- FrameOffset offset = linkage()->GetFrameOffset(
- AllocatedOperand::cast(op)->index(), frame(), 0);
+ FrameOffset offset =
+ linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
offset.offset());
}
@@ -1085,41 +1084,22 @@ static int AlignedStackSlots(int stack_slots) {
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
__ SetStackPointer(csp);
__ Push(lr, fp);
__ Mov(fp, csp);
-
- // Save FP registers.
- CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
- descriptor->CalleeSavedFPRegisters());
- DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
- int saved_count = saves_fp.Count();
- __ PushCPURegList(saves_fp);
- // Save registers.
- CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
- descriptor->CalleeSavedRegisters());
- // TODO(palfia): TF save list is not in sync with
- // CPURegList::GetCalleeSaved(): x30 is missing.
- // DCHECK(saves.list() == CPURegList::GetCalleeSaved().list());
- saved_count += saves.Count();
- __ PushCPURegList(saves);
-
- frame()->SetRegisterSaveAreaSize(saved_count * kPointerSize);
} else if (descriptor->IsJSFunctionCall()) {
CompilationInfo* info = this->info();
__ SetStackPointer(jssp);
__ Prologue(info->IsCodePreAgingActive());
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
} else if (needs_frame_) {
__ SetStackPointer(jssp);
__ StubPrologue();
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ frame()->SetElidedFrameSizeInSlots(0);
}
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1132,65 +1112,75 @@ void CodeGenerator::AssemblePrologue() {
osr_pc_offset_ = __ pc_offset();
// TODO(titzer): cannot address target function == local #-1
__ ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
- stack_slots -= frame()->GetOsrStackSlotCount();
+ stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
- if (stack_slots > 0) {
+ if (stack_shrink_slots > 0) {
Register sp = __ StackPointer();
if (!sp.Is(csp)) {
- __ Sub(sp, sp, stack_slots * kPointerSize);
+ __ Sub(sp, sp, stack_shrink_slots * kPointerSize);
}
- __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
+ __ Sub(csp, csp, AlignedStackSlots(stack_shrink_slots) * kPointerSize);
+ }
+
+ // Save FP registers.
+ CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ descriptor->CalleeSavedFPRegisters());
+ int saved_count = saves_fp.Count();
+ if (saved_count != 0) {
+ DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
+ __ PushCPURegList(saves_fp);
+ frame()->AllocateSavedCalleeRegisterSlots(saved_count *
+ (kDoubleSize / kPointerSize));
+ }
+ // Save registers.
+ // TODO(palfia): TF save list is not in sync with
+ // CPURegList::GetCalleeSaved(): x30 is missing.
+ // DCHECK(saves.list() == CPURegList::GetCalleeSaved().list());
+ CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
+ descriptor->CalleeSavedRegisters());
+ saved_count = saves.Count();
+ if (saved_count != 0) {
+ __ PushCPURegList(saves);
+ frame()->AllocateSavedCalleeRegisterSlots(saved_count);
}
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
- if (frame()->GetRegisterSaveAreaSize() > 0) {
- // Remove this frame's spill slots first.
- if (stack_slots > 0) {
- __ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
- }
- // Restore registers.
- CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
- descriptor->CalleeSavedRegisters());
- __ PopCPURegList(saves);
+ // Restore registers.
+ CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
+ descriptor->CalleeSavedRegisters());
+ if (saves.Count() != 0) {
+ __ PopCPURegList(saves);
+ }
- CPURegList saves_fp =
- CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
- descriptor->CalleeSavedFPRegisters());
- __ PopCPURegList(saves_fp);
- }
+ // Restore fp registers.
+ CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ descriptor->CalleeSavedFPRegisters());
+ if (saves_fp.Count() != 0) {
+ __ PopCPURegList(saves_fp);
+ }
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
__ Mov(csp, fp);
__ Pop(fp, lr);
- __ Ret();
} else if (descriptor->IsJSFunctionCall() || needs_frame_) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ B(&return_label_);
+ return;
} else {
__ Bind(&return_label_);
__ Mov(jssp, fp);
__ Pop(fp, lr);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : (info()->IsStub()
- ? info()->code_stub()->GetStackParameterCount()
- : 0);
- if (pop_count != 0) {
- __ Drop(pop_count);
- }
- __ Ret();
}
- } else {
- __ Ret();
}
+ __ Drop(pop_count);
+ __ Ret();
}
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index ca0ec4e400..76b1059eb5 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -1876,6 +1876,14 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
case IrOpcode::kWord32And:
return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
kLogical32Imm);
+ case IrOpcode::kWord32Equal: {
+ // Word32Equal(Word32Equal(x, y), 0) => Word32Compare(x, y, ne).
+ Int32BinopMatcher mequal(value);
+ node->ReplaceInput(0, mequal.left().node());
+ node->ReplaceInput(1, mequal.right().node());
+ cont.Negate();
+ return VisitWord32Compare(this, node, &cont);
+ }
default:
break;
}
diff --git a/deps/v8/src/compiler/arm64/linkage-arm64.cc b/deps/v8/src/compiler/arm64/linkage-arm64.cc
deleted file mode 100644
index afedefbbd1..0000000000
--- a/deps/v8/src/compiler/arm64/linkage-arm64.cc
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/linkage-impl.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-struct Arm64LinkageHelperTraits {
- static Register ReturnValueReg() { return x0; }
- static Register ReturnValue2Reg() { return x1; }
- static Register JSCallFunctionReg() { return x1; }
- static Register ContextReg() { return cp; }
- static Register RuntimeCallFunctionReg() { return x1; }
- static Register RuntimeCallArgCountReg() { return x0; }
- static RegList CCalleeSaveRegisters() {
- return (1 << x19.code()) | (1 << x20.code()) | (1 << x21.code()) |
- (1 << x22.code()) | (1 << x23.code()) | (1 << x24.code()) |
- (1 << x25.code()) | (1 << x26.code()) | (1 << x27.code()) |
- (1 << x28.code()) | (1 << x29.code()) | (1 << x30.code());
- }
- static RegList CCalleeSaveFPRegisters() {
- return (1 << d8.code()) | (1 << d9.code()) | (1 << d10.code()) |
- (1 << d11.code()) | (1 << d12.code()) | (1 << d13.code()) |
- (1 << d14.code()) | (1 << d15.code());
- }
- static Register CRegisterParameter(int i) {
- static Register register_parameters[] = {x0, x1, x2, x3, x4, x5, x6, x7};
- return register_parameters[i];
- }
- static int CRegisterParametersLength() { return 8; }
- static int CStackBackingStoreLength() { return 0; }
-};
-
-
-typedef LinkageHelper<Arm64LinkageHelperTraits> LH;
-
-CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
- int parameter_count,
- CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Zone* zone, Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties) {
- return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
- properties);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
- int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties, MachineType return_type) {
- return LH::GetStubCallDescriptor(isolate, zone, descriptor,
- stack_parameter_count, flags, properties,
- return_type);
-}
-
-
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- const MachineSignature* sig) {
- return LH::GetSimplifiedCDescriptor(zone, sig);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index 341aedc099..e772579923 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -15,7 +15,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/state-values-utils.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/parser.h"
#include "src/scopes.h"
@@ -449,24 +449,24 @@ AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
liveness_analyzer_(static_cast<size_t>(info->scope()->num_stack_slots()),
local_zone),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
- FrameStateType::kJavaScriptFunction,
- info->num_parameters_including_this(),
- info->scope()->num_stack_slots(), info->shared_info())),
+ FrameStateType::kJavaScriptFunction, info->num_parameters() + 1,
+ info->scope()->num_stack_slots(), info->shared_info(),
+ CALL_MAINTAINS_NATIVE_CONTEXT)),
js_type_feedback_(js_type_feedback) {
InitializeAstVisitor(info->isolate(), local_zone);
}
Node* AstGraphBuilder::GetFunctionClosureForContext() {
- Scope* declaration_scope = current_scope()->DeclarationScope();
- if (declaration_scope->is_script_scope() ||
- declaration_scope->is_module_scope()) {
+ Scope* closure_scope = current_scope()->ClosureScope();
+ if (closure_scope->is_script_scope() ||
+ closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function as
// their closure, not the anonymous closure containing the global code.
// Pass a SMI sentinel and let the runtime look up the empty function.
return jsgraph()->SmiConstant(0);
} else {
- DCHECK(declaration_scope->is_function_scope());
+ DCHECK(closure_scope->is_function_scope());
return GetFunctionClosure();
}
}
@@ -520,6 +520,13 @@ bool AstGraphBuilder::CreateGraph(bool stack_check) {
// Initialize control scope.
ControlScope control(this);
+ // TODO(mstarzinger): For now we cannot assume that the {this} parameter is
+ // not {the_hole}, because for derived classes {this} has a TDZ and the
+ // JSConstructStubForDerived magically passes {the_hole} as a receiver.
+ if (scope->has_this_declaration() && scope->receiver()->is_const_mode()) {
+ env.RawParameterBind(0, jsgraph()->TheHoleConstant());
+ }
+
// Build receiver check for sloppy mode if necessary.
// TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
if (scope->has_this_declaration()) {
@@ -584,11 +591,6 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
return;
}
- // Visit implicit declaration of the function name.
- if (scope->is_function_scope() && scope->function() != NULL) {
- VisitVariableDeclaration(scope->function());
- }
-
// Visit declarations within the function scope.
VisitDeclarations(scope->declarations());
@@ -599,7 +601,7 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
}
// Visit statements in the function body.
- VisitStatements(info()->function()->body());
+ VisitStatements(info()->literal()->body());
// Emit tracing call if requested to do so.
if (FLAG_trace) {
@@ -1505,7 +1507,8 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
- Node* node = NewNode(javascript()->CallRuntime(Runtime::kDebugBreak, 0));
+ Node* node =
+ NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement, 0));
PrepareFrameState(node, stmt->DebugBreakId());
environment()->MarkAllLocalsLive();
}
@@ -1559,11 +1562,13 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
Node* constructor = environment()->Pop();
Node* extends = environment()->Pop();
Node* name = environment()->Pop();
- Node* script = jsgraph()->Constant(info()->script());
Node* start = jsgraph()->Constant(expr->start_position());
Node* end = jsgraph()->Constant(expr->end_position());
- const Operator* opc = javascript()->CallRuntime(Runtime::kDefineClass, 6);
- Node* literal = NewNode(opc, name, extends, constructor, script, start, end);
+ const Operator* opc = javascript()->CallRuntime(
+ is_strong(language_mode()) ? Runtime::kDefineClassStrong
+ : Runtime::kDefineClass,
+ 5);
+ Node* literal = NewNode(opc, name, extends, constructor, start, end);
PrepareFrameState(literal, expr->CreateLiteralId(),
OutputFrameStateCombine::Push());
@@ -1633,23 +1638,26 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
}
}
- // Transform both the class literal and the prototype to fast properties.
- const Operator* op = javascript()->CallRuntime(Runtime::kToFastProperties, 1);
- NewNode(op, environment()->Pop()); // prototype
- NewNode(op, environment()->Pop()); // literal
+ // Set both the prototype and constructor to have fast properties, and also
+ // freeze them in strong mode.
+ environment()->Pop(); // proto
+ environment()->Pop(); // literal
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ literal = NewNode(op, literal, proto);
// Assign to class variable.
if (expr->scope() != NULL) {
DCHECK_NOT_NULL(expr->class_variable_proxy());
Variable* var = expr->class_variable_proxy()->var();
FrameStateBeforeAndAfter states(this, BailoutId::None());
- VectorSlotPair feedback = CreateVectorSlotPair(
- FLAG_vector_stores ? expr->GetNthSlot(store_slot_index++)
- : FeedbackVectorICSlot::Invalid());
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(FLAG_vector_stores && var->IsUnallocated()
+ ? expr->GetNthSlot(store_slot_index++)
+ : FeedbackVectorICSlot::Invalid());
BuildVariableAssignment(var, literal, Token::INIT_CONST, feedback,
BailoutId::None(), states);
}
-
ast_context()->ProduceValue(literal);
}
@@ -1953,6 +1961,9 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
FrameStateBeforeAndAfter states(this, subexpr->id());
Node* value = environment()->Pop();
Node* index = jsgraph()->Constant(array_index);
+ // TODO(turbofan): More efficient code could be generated here. Consider
+ // that the store will be generic because we don't have a feedback vector
+ // slot.
Node* store = BuildKeyedStore(literal, index, value, VectorSlotPair(),
TypeFeedbackId::None());
states.AddToNode(store, expr->GetIdForElement(array_index),
@@ -2000,7 +2011,7 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
const VectorSlotPair& feedback,
BailoutId bailout_id) {
- DCHECK(expr->IsValidReferenceExpression());
+ DCHECK(expr->IsValidReferenceExpressionOrThis());
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->AsProperty();
@@ -2074,7 +2085,7 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
void AstGraphBuilder::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpression());
+ DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->target()->AsProperty();
@@ -2412,11 +2423,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
break;
}
case Call::SUPER_CALL:
- // TODO(dslomov): Implement super calls.
- callee_value = jsgraph()->UndefinedConstant();
- receiver_value = jsgraph()->UndefinedConstant();
- SetStackOverflow();
- break;
+ return VisitCallSuper(expr);
case Call::POSSIBLY_EVAL_CALL:
possibly_eval = true;
if (callee->AsVariableProxy()->var()->IsLookupSlot()) {
@@ -2483,6 +2490,35 @@ void AstGraphBuilder::VisitCall(Call* expr) {
}
+void AstGraphBuilder::VisitCallSuper(Call* expr) {
+ SuperCallReference* super = expr->expression()->AsSuperCallReference();
+ DCHECK_NOT_NULL(super);
+
+ // Prepare the callee to the super call. The super constructor is stored as
+ // the prototype of the constructor we are currently executing.
+ VisitForValue(super->this_function_var());
+ Node* this_function = environment()->Pop();
+ const Operator* op = javascript()->CallRuntime(Runtime::kGetPrototype, 1);
+ Node* super_function = NewNode(op, this_function);
+ // TODO(mstarzinger): This probably needs a proper bailout id.
+ PrepareFrameState(super_function, BailoutId::None());
+ environment()->Push(super_function);
+
+ // Evaluate all arguments to the super call.
+ ZoneList<Expression*>* args = expr->arguments();
+ VisitForValues(args);
+
+ // Original constructor is loaded from the {new.target} variable.
+ VisitForValue(super->new_target_var());
+
+ // Create node to perform the super call.
+ const Operator* call = javascript()->CallConstruct(args->length() + 2);
+ Node* value = ProcessArguments(call, args->length() + 2);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(value);
+}
+
+
void AstGraphBuilder::VisitCallNew(CallNew* expr) {
VisitForValue(expr->expression());
@@ -2490,9 +2526,12 @@ void AstGraphBuilder::VisitCallNew(CallNew* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForValues(args);
+ // Original constructor is the same as the callee.
+ environment()->Push(environment()->Peek(args->length()));
+
// Create node to perform the construct call.
- const Operator* call = javascript()->CallConstruct(args->length() + 1);
- Node* value = ProcessArguments(call, args->length() + 1);
+ const Operator* call = javascript()->CallConstruct(args->length() + 2);
+ Node* value = ProcessArguments(call, args->length() + 2);
PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
@@ -2539,9 +2578,7 @@ void AstGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
// TODO(mstarzinger): This bailout is a gigantic hack, the owner is ashamed.
if (function->function_id == Runtime::kInlineGeneratorNext ||
- function->function_id == Runtime::kInlineGeneratorThrow ||
- function->function_id == Runtime::kInlineDefaultConstructorCallSuper ||
- function->function_id == Runtime::kInlineCallSuperWithSpread) {
+ function->function_id == Runtime::kInlineGeneratorThrow) {
ast_context()->ProduceValue(jsgraph()->TheHoleConstant());
return SetStackOverflow();
}
@@ -2577,7 +2614,7 @@ void AstGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
- DCHECK(expr->expression()->IsValidReferenceExpression());
+ DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->expression()->AsProperty();
@@ -2868,8 +2905,8 @@ void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
DeclareGlobalsLanguageMode::encode(language_mode());
Node* flags = jsgraph()->Constant(encoded_flags);
Node* pairs = jsgraph()->Constant(data);
- const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals, 3);
- Node* call = NewNode(op, current_context(), pairs, flags);
+ const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals, 2);
+ Node* call = NewNode(op, pairs, flags);
PrepareFrameState(call, BailoutId::Declarations());
globals()->clear();
}
@@ -2945,7 +2982,7 @@ void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) {
FrameStateBeforeAndAfter states(this, BeforeId(proxy));
operand =
BuildVariableLoad(proxy->var(), expr->expression()->id(), states, pair,
- OutputFrameStateCombine::Push(), NOT_CONTEXTUAL);
+ OutputFrameStateCombine::Push(), INSIDE_TYPEOF);
} else {
VisitForValue(expr->expression());
operand = environment()->Pop();
@@ -3014,7 +3051,7 @@ VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(
uint32_t AstGraphBuilder::ComputeBitsetForDynamicGlobal(Variable* variable) {
DCHECK_EQ(DYNAMIC_GLOBAL, variable->mode());
bool found_eval_scope = false;
- EnumSet<int, uint32_t> check_depths;
+ uint32_t check_depths = 0;
for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
if (s->num_heap_slots() <= 0) continue;
// TODO(mstarzinger): If we have reached an eval scope, we check all
@@ -3026,15 +3063,15 @@ uint32_t AstGraphBuilder::ComputeBitsetForDynamicGlobal(Variable* variable) {
if (depth > DynamicGlobalAccess::kMaxCheckDepth) {
return DynamicGlobalAccess::kFullCheckRequired;
}
- check_depths.Add(depth);
+ check_depths |= 1 << depth;
}
- return check_depths.ToIntegral();
+ return check_depths;
}
uint32_t AstGraphBuilder::ComputeBitsetForDynamicContext(Variable* variable) {
DCHECK_EQ(DYNAMIC_LOCAL, variable->mode());
- EnumSet<int, uint32_t> check_depths;
+ uint32_t check_depths = 0;
for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
if (s->num_heap_slots() <= 0) continue;
if (!s->calls_sloppy_eval() && s != variable->scope()) continue;
@@ -3042,10 +3079,10 @@ uint32_t AstGraphBuilder::ComputeBitsetForDynamicContext(Variable* variable) {
if (depth > DynamicContextAccess::kMaxCheckDepth) {
return DynamicContextAccess::kFullCheckRequired;
}
- check_depths.Add(depth);
+ check_depths |= 1 << depth;
if (s == variable->scope()) break;
}
- return check_depths.ToIntegral();
+ return check_depths;
}
@@ -3218,9 +3255,9 @@ Node* AstGraphBuilder::BuildHoleCheckSilent(Node* value, Node* for_hole,
}
-Node* AstGraphBuilder::BuildHoleCheckThrow(Node* value, Variable* variable,
- Node* not_hole,
- BailoutId bailout_id) {
+Node* AstGraphBuilder::BuildHoleCheckThenThrow(Node* value, Variable* variable,
+ Node* not_hole,
+ BailoutId bailout_id) {
IfBuilder hole_check(this);
Node* the_hole = jsgraph()->TheHoleConstant();
Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
@@ -3235,6 +3272,23 @@ Node* AstGraphBuilder::BuildHoleCheckThrow(Node* value, Variable* variable,
}
+Node* AstGraphBuilder::BuildHoleCheckElseThrow(Node* value, Variable* variable,
+ Node* for_hole,
+ BailoutId bailout_id) {
+ IfBuilder hole_check(this);
+ Node* the_hole = jsgraph()->TheHoleConstant();
+ Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+ hole_check.If(check);
+ hole_check.Then();
+ environment()->Push(for_hole);
+ hole_check.Else();
+ Node* error = BuildThrowReferenceError(variable, bailout_id);
+ environment()->Push(error);
+ hole_check.End();
+ return environment()->Pop();
+}
+
+
Node* AstGraphBuilder::BuildThrowIfStaticPrototype(Node* name,
BailoutId bailout_id) {
IfBuilder prototype_check(this);
@@ -3257,16 +3311,29 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
FrameStateBeforeAndAfter& states,
const VectorSlotPair& feedback,
OutputFrameStateCombine combine,
- ContextualMode contextual_mode) {
+ TypeofMode typeof_mode) {
Node* the_hole = jsgraph()->TheHoleConstant();
VariableMode mode = variable->mode();
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
+ Node* script_context = current_context();
+ int slot_index = -1;
+ if (variable->index() > 0) {
+ DCHECK(variable->IsStaticGlobalObjectProperty());
+ slot_index = variable->index();
+ int depth = current_scope()->ContextChainLength(variable->scope());
+ if (depth > 0) {
+ const Operator* op = javascript()->LoadContext(
+ depth - 1, Context::PREVIOUS_INDEX, true);
+ script_context = NewNode(op, current_context());
+ }
+ }
Node* global = BuildLoadGlobalObject();
Handle<Name> name = variable->name();
- Node* value = BuildGlobalLoad(global, name, feedback, contextual_mode);
+ Node* value = BuildGlobalLoad(script_context, global, name, feedback,
+ typeof_mode, slot_index);
states.AddToNode(value, bailout_id, combine);
return value;
}
@@ -3284,13 +3351,10 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
}
} else if (mode == LET || mode == CONST) {
// Perform check for uninitialized let/const variables.
- // TODO(mstarzinger): For now we cannot use the below optimization for
- // the {this} parameter, because JSConstructStubForDerived magically
- // passes {the_hole} as a receiver.
if (value->op() == the_hole->op()) {
value = BuildThrowReferenceError(variable, bailout_id);
- } else if (value->opcode() == IrOpcode::kPhi || variable->is_this()) {
- value = BuildHoleCheckThrow(value, variable, value, bailout_id);
+ } else if (value->opcode() == IrOpcode::kPhi) {
+ value = BuildHoleCheckThenThrow(value, variable, value, bailout_id);
}
}
return value;
@@ -3311,7 +3375,7 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
value = BuildHoleCheckSilent(value, undefined, value);
} else if (mode == LET || mode == CONST) {
// Perform check for uninitialized let/const variables.
- value = BuildHoleCheckThrow(value, variable, value, bailout_id);
+ value = BuildHoleCheckThenThrow(value, variable, value, bailout_id);
}
return value;
}
@@ -3322,7 +3386,7 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
if (mode == DYNAMIC_GLOBAL) {
uint32_t check_bitset = ComputeBitsetForDynamicGlobal(variable);
const Operator* op = javascript()->LoadDynamicGlobal(
- name, check_bitset, feedback, contextual_mode);
+ name, check_bitset, feedback, typeof_mode);
value = NewNode(op, BuildLoadFeedbackVector(), current_context());
states.AddToNode(value, bailout_id, combine);
} else if (mode == DYNAMIC_LOCAL) {
@@ -3342,12 +3406,12 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
value = BuildHoleCheckSilent(value, undefined, value);
} else if (local_mode == LET || local_mode == CONST) {
// Perform check for uninitialized let/const variables.
- value = BuildHoleCheckThrow(value, local, value, bailout_id);
+ value = BuildHoleCheckThenThrow(value, local, value, bailout_id);
}
} else if (mode == DYNAMIC) {
uint32_t check_bitset = DynamicGlobalAccess::kFullCheckRequired;
const Operator* op = javascript()->LoadDynamicGlobal(
- name, check_bitset, feedback, contextual_mode);
+ name, check_bitset, feedback, typeof_mode);
value = NewNode(op, BuildLoadFeedbackVector(), current_context());
states.AddToNode(value, bailout_id, combine);
}
@@ -3404,10 +3468,23 @@ Node* AstGraphBuilder::BuildVariableAssignment(
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
+ Node* script_context = current_context();
+ int slot_index = -1;
+ if (variable->index() > 0) {
+ DCHECK(variable->IsStaticGlobalObjectProperty());
+ slot_index = variable->index();
+ int depth = current_scope()->ContextChainLength(variable->scope());
+ if (depth > 0) {
+ const Operator* op = javascript()->LoadContext(
+ depth - 1, Context::PREVIOUS_INDEX, true);
+ script_context = NewNode(op, current_context());
+ }
+ }
Node* global = BuildLoadGlobalObject();
Handle<Name> name = variable->name();
- Node* store = BuildGlobalStore(global, name, value, feedback,
- TypeFeedbackId::None());
+ Node* store =
+ BuildGlobalStore(script_context, global, name, value, feedback,
+ TypeFeedbackId::None(), slot_index);
states.AddToNode(store, bailout_id, combine);
return store;
}
@@ -3437,7 +3514,15 @@ Node* AstGraphBuilder::BuildVariableAssignment(
if (current->op() == the_hole->op()) {
value = BuildThrowReferenceError(variable, bailout_id);
} else if (value->opcode() == IrOpcode::kPhi) {
- value = BuildHoleCheckThrow(current, variable, value, bailout_id);
+ value = BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ }
+ } else if (mode == CONST && op == Token::INIT_CONST) {
+ // Perform an initialization check for const {this} variables.
+ // Note that the {this} variable is the only const variable being able
+ // to trigger bind operations outside the TDZ, via {super} calls.
+ Node* current = environment()->Lookup(variable);
+ if (current->op() != the_hole->op() && variable->is_this()) {
+ value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
}
} else if (mode == CONST && op != Token::INIT_CONST) {
// Assignment to const is exception in all modes.
@@ -3445,7 +3530,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
if (current->op() == the_hole->op()) {
return BuildThrowReferenceError(variable, bailout_id);
} else if (value->opcode() == IrOpcode::kPhi) {
- BuildHoleCheckThrow(current, variable, value, bailout_id);
+ BuildHoleCheckThenThrow(current, variable, value, bailout_id);
}
return BuildThrowConstAssignError(bailout_id);
}
@@ -3473,13 +3558,23 @@ Node* AstGraphBuilder::BuildVariableAssignment(
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
Node* current = NewNode(op, current_context());
- value = BuildHoleCheckThrow(current, variable, value, bailout_id);
+ value = BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ } else if (mode == CONST && op == Token::INIT_CONST) {
+ // Perform an initialization check for const {this} variables.
+ // Note that the {this} variable is the only const variable being able
+ // to trigger bind operations outside the TDZ, via {super} calls.
+ if (variable->is_this()) {
+ const Operator* op =
+ javascript()->LoadContext(depth, variable->index(), false);
+ Node* current = NewNode(op, current_context());
+ value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
+ }
} else if (mode == CONST && op != Token::INIT_CONST) {
// Assignment to const is exception in all modes.
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
Node* current = NewNode(op, current_context());
- BuildHoleCheckThrow(current, variable, value, bailout_id);
+ BuildHoleCheckThenThrow(current, variable, value, bailout_id);
return BuildThrowConstAssignError(bailout_id);
}
const Operator* op = javascript()->StoreContext(depth, variable->index());
@@ -3611,23 +3706,25 @@ Node* AstGraphBuilder::BuildNamedSuperStore(Node* receiver, Node* home_object,
}
-Node* AstGraphBuilder::BuildGlobalLoad(Node* object, Handle<Name> name,
+Node* AstGraphBuilder::BuildGlobalLoad(Node* script_context, Node* global,
+ Handle<Name> name,
const VectorSlotPair& feedback,
- ContextualMode mode) {
- const Operator* op =
- javascript()->LoadGlobal(MakeUnique(name), feedback, mode);
- Node* node = NewNode(op, object, BuildLoadFeedbackVector());
+ TypeofMode typeof_mode, int slot_index) {
+ const Operator* op = javascript()->LoadGlobal(MakeUnique(name), feedback,
+ typeof_mode, slot_index);
+ Node* node = NewNode(op, script_context, global, BuildLoadFeedbackVector());
return Record(js_type_feedback_, node, feedback.slot());
}
-Node* AstGraphBuilder::BuildGlobalStore(Node* object, Handle<Name> name,
- Node* value,
+Node* AstGraphBuilder::BuildGlobalStore(Node* script_context, Node* global,
+ Handle<Name> name, Node* value,
const VectorSlotPair& feedback,
- TypeFeedbackId id) {
- const Operator* op =
- javascript()->StoreGlobal(language_mode(), MakeUnique(name), feedback);
- Node* node = NewNode(op, object, value, BuildLoadFeedbackVector());
+ TypeFeedbackId id, int slot_index) {
+ const Operator* op = javascript()->StoreGlobal(
+ language_mode(), MakeUnique(name), feedback, slot_index);
+ Node* node =
+ NewNode(op, script_context, global, value, BuildLoadFeedbackVector());
if (FLAG_vector_stores) {
return Record(js_type_feedback_, node, feedback.slot());
}
@@ -3921,7 +4018,7 @@ Node** AstGraphBuilder::EnsureInputBufferSize(int size) {
Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
Node** value_inputs, bool incomplete) {
- DCHECK(op->ValueInputCount() == value_input_count);
+ DCHECK_EQ(op->ValueInputCount(), value_input_count);
bool has_context = OperatorProperties::HasContextInput(op);
int frame_state_count = OperatorProperties::GetFrameStateInputCount(op);
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index 07de774c45..bb031ff447 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -284,7 +284,7 @@ class AstGraphBuilder : public AstVisitor {
FrameStateBeforeAndAfter& states,
const VectorSlotPair& feedback,
OutputFrameStateCombine framestate_combine,
- ContextualMode mode = CONTEXTUAL);
+ TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
// Builders for property loads and stores.
Node* BuildKeyedLoad(Node* receiver, Node* key,
@@ -307,10 +307,12 @@ class AstGraphBuilder : public AstVisitor {
const VectorSlotPair& feedback);
// Builders for global variable loads and stores.
- Node* BuildGlobalLoad(Node* global, Handle<Name> name,
- const VectorSlotPair& feedback, ContextualMode mode);
- Node* BuildGlobalStore(Node* global, Handle<Name> name, Node* value,
- const VectorSlotPair& feedback, TypeFeedbackId id);
+ Node* BuildGlobalLoad(Node* script_context, Node* global, Handle<Name> name,
+ const VectorSlotPair& feedback, TypeofMode typeof_mode,
+ int slot_index);
+ Node* BuildGlobalStore(Node* script_context, Node* global, Handle<Name> name,
+ Node* value, const VectorSlotPair& feedback,
+ TypeFeedbackId id, int slot_index);
// Builders for accessing the function context.
Node* BuildLoadBuiltinsObject();
@@ -345,8 +347,10 @@ class AstGraphBuilder : public AstVisitor {
// Builders for dynamic hole-checks at runtime.
Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole);
- Node* BuildHoleCheckThrow(Node* value, Variable* var, Node* not_hole,
- BailoutId bailout_id);
+ Node* BuildHoleCheckThenThrow(Node* value, Variable* var, Node* not_hole,
+ BailoutId bailout_id);
+ Node* BuildHoleCheckElseThrow(Node* value, Variable* var, Node* for_hole,
+ BailoutId bailout_id);
// Builders for conditional errors.
Node* BuildThrowIfStaticPrototype(Node* name, BailoutId bailout_id);
@@ -385,6 +389,9 @@ class AstGraphBuilder : public AstVisitor {
// Common for all IterationStatement bodies.
void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop);
+ // Dispatched from VisitCall.
+ void VisitCallSuper(Call* expr);
+
// Dispatched from VisitCallRuntime.
void VisitCallJSRuntime(CallRuntime* expr);
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
index 61ed4f27cb..4d1d41e122 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
@@ -21,7 +21,7 @@ ALAA::AstLoopAssignmentAnalyzer(Zone* zone, CompilationInfo* info)
LoopAssignmentAnalysis* ALAA::Analyze() {
LoopAssignmentAnalysis* a = new (zone()) LoopAssignmentAnalysis(zone());
result_ = a;
- VisitStatements(info()->function()->body());
+ VisitStatements(info()->literal()->body());
result_ = NULL;
return a;
}
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.h b/deps/v8/src/compiler/basic-block-instrumentor.h
index 353b0c2ba7..32dd82ade1 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.h
+++ b/deps/v8/src/compiler/basic-block-instrumentor.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_BASIC_BLOCK_INSTRUMENTOR_H_
#define V8_COMPILER_BASIC_BLOCK_INSTRUMENTOR_H_
+#include "src/allocation.h"
#include "src/basic-block-profiler.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
new file mode 100644
index 0000000000..76ddd2ed7d
--- /dev/null
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -0,0 +1,228 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/assembler.h"
+#include "src/macro-assembler.h"
+
+#include "src/compiler/linkage.h"
+
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+LinkageLocation regloc(Register reg) {
+ return LinkageLocation::ForRegister(Register::ToAllocationIndex(reg));
+}
+
+
+// Platform-specific configuration for C calling convention.
+#if V8_TARGET_ARCH_IA32
+// ===========================================================================
+// == ia32 ===================================================================
+// ===========================================================================
+#define CALLEE_SAVE_REGISTERS esi.bit() | edi.bit() | ebx.bit()
+
+#elif V8_TARGET_ARCH_X64
+// ===========================================================================
+// == x64 ====================================================================
+// ===========================================================================
+
+#ifdef _WIN64
+// == x64 windows ============================================================
+#define STACK_SHADOW_WORDS 4
+#define PARAM_REGISTERS rcx, rdx, r8, r9
+#define CALLEE_SAVE_REGISTERS \
+ rbx.bit() | rdi.bit() | rsi.bit() | r12.bit() | r13.bit() | r14.bit() | \
+ r15.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ (1 << xmm6.code()) | (1 << xmm7.code()) | (1 << xmm8.code()) | \
+ (1 << xmm9.code()) | (1 << xmm10.code()) | (1 << xmm11.code()) | \
+ (1 << xmm12.code()) | (1 << xmm13.code()) | (1 << xmm14.code()) | \
+ (1 << xmm15.code())
+#else
+// == x64 other ==============================================================
+#define PARAM_REGISTERS rdi, rsi, rdx, rcx, r8, r9
+#define CALLEE_SAVE_REGISTERS \
+ rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit()
+#endif
+
+#elif V8_TARGET_ARCH_X87
+// ===========================================================================
+// == x87 ====================================================================
+// ===========================================================================
+#define CALLEE_SAVE_REGISTERS esi.bit() | edi.bit() | ebx.bit()
+
+#elif V8_TARGET_ARCH_ARM
+// ===========================================================================
+// == arm ====================================================================
+// ===========================================================================
+#define PARAM_REGISTERS r0, r1, r2, r3
+#define CALLEE_SAVE_REGISTERS \
+ r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ (1 << d8.code()) | (1 << d9.code()) | (1 << d10.code()) | \
+ (1 << d11.code()) | (1 << d12.code()) | (1 << d13.code()) | \
+ (1 << d14.code()) | (1 << d15.code())
+
+
+#elif V8_TARGET_ARCH_ARM64
+// ===========================================================================
+// == arm64 ====================================================================
+// ===========================================================================
+#define PARAM_REGISTERS x0, x1, x2, x3, x4, x5, x6, x7
+#define CALLEE_SAVE_REGISTERS \
+ (1 << x19.code()) | (1 << x20.code()) | (1 << x21.code()) | \
+ (1 << x22.code()) | (1 << x23.code()) | (1 << x24.code()) | \
+ (1 << x25.code()) | (1 << x26.code()) | (1 << x27.code()) | \
+ (1 << x28.code()) | (1 << x29.code()) | (1 << x30.code())
+
+
+#define CALLEE_SAVE_FP_REGISTERS \
+ (1 << d8.code()) | (1 << d9.code()) | (1 << d10.code()) | \
+ (1 << d11.code()) | (1 << d12.code()) | (1 << d13.code()) | \
+ (1 << d14.code()) | (1 << d15.code())
+
+#elif V8_TARGET_ARCH_MIPS
+// ===========================================================================
+// == mips ===================================================================
+// ===========================================================================
+#define PARAM_REGISTERS a0, a1, a2, a3
+#define CALLEE_SAVE_REGISTERS \
+ s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \
+ s7.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit()
+
+#elif V8_TARGET_ARCH_MIPS64
+// ===========================================================================
+// == mips64 =================================================================
+// ===========================================================================
+#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
+#define CALLEE_SAVE_REGISTERS \
+ s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \
+ s7.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit()
+
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+// ===========================================================================
+// == ppc & ppc64 ============================================================
+// ===========================================================================
+#define PARAM_REGISTERS r3, r4, r5, r6, r7, r8, r9, r10
+#define CALLEE_SAVE_REGISTERS \
+ r14.bit() | r15.bit() | r16.bit() | r17.bit() | r18.bit() | r19.bit() | \
+ r20.bit() | r21.bit() | r22.bit() | r23.bit() | r24.bit() | r25.bit() | \
+ r26.bit() | r27.bit() | r28.bit() | r29.bit() | r30.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ d14.bit() | d15.bit() | d16.bit() | d17.bit() | d18.bit() | d19.bit() | \
+ d20.bit() | d21.bit() | d22.bit() | d23.bit() | d24.bit() | d25.bit() | \
+ d26.bit() | d27.bit() | d28.bit() | d29.bit() | d30.bit() | d31.bit()
+
+#else
+// ===========================================================================
+// == unknown ================================================================
+// ===========================================================================
+#define UNSUPPORTED_C_LINKAGE 1
+#endif
+} // namespace
+
+
+// General code uses the above configuration data.
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(
+ Zone* zone, const MachineSignature* msig) {
+ LocationSignature::Builder locations(zone, msig->return_count(),
+ msig->parameter_count());
+#if 0 // TODO(titzer): instruction selector tests break here.
+ // Check the types of the signature.
+ // Currently no floating point parameters or returns are allowed because
+ // on x87 and ia32, the FP top of stack is involved.
+
+ for (size_t i = 0; i < msig->return_count(); i++) {
+ MachineType type = RepresentationOf(msig->GetReturn(i));
+ CHECK(type != kRepFloat32 && type != kRepFloat64);
+ }
+ for (size_t i = 0; i < msig->parameter_count(); i++) {
+ MachineType type = RepresentationOf(msig->GetParam(i));
+ CHECK(type != kRepFloat32 && type != kRepFloat64);
+ }
+#endif
+
+#ifdef UNSUPPORTED_C_LINKAGE
+ // This method should not be called on unknown architectures.
+ V8_Fatal(__FILE__, __LINE__,
+ "requested C call descriptor on unsupported architecture");
+ return nullptr;
+#endif
+
+ // Add return location(s).
+ CHECK(locations.return_count_ <= 2);
+
+ if (locations.return_count_ > 0) {
+ locations.AddReturn(regloc(kReturnRegister0));
+ }
+ if (locations.return_count_ > 1) {
+ locations.AddReturn(regloc(kReturnRegister1));
+ }
+
+ const int parameter_count = static_cast<int>(msig->parameter_count());
+
+#ifdef PARAM_REGISTERS
+ static const Register kParamRegisters[] = {PARAM_REGISTERS};
+ static const int kParamRegisterCount =
+ static_cast<int>(arraysize(kParamRegisters));
+#else
+ static const Register* kParamRegisters = nullptr;
+ static const int kParamRegisterCount = 0;
+#endif
+
+#ifdef STACK_SHADOW_WORDS
+ int stack_offset = STACK_SHADOW_WORDS;
+#else
+ int stack_offset = 0;
+#endif
+ // Add register and/or stack parameter(s).
+ for (int i = 0; i < parameter_count; i++) {
+ if (i < kParamRegisterCount) {
+ locations.AddParam(regloc(kParamRegisters[i]));
+ } else {
+ locations.AddParam(
+ LinkageLocation::ForCallerFrameSlot(-1 - stack_offset));
+ stack_offset++;
+ }
+ }
+
+#ifdef CALLEE_SAVE_REGISTERS
+ const RegList kCalleeSaveRegisters = CALLEE_SAVE_REGISTERS;
+#else
+ const RegList kCalleeSaveRegisters = 0;
+#endif
+
+#ifdef CALLEE_SAVE_FP_REGISTERS
+ const RegList kCalleeSaveFPRegisters = CALLEE_SAVE_FP_REGISTERS;
+#else
+ const RegList kCalleeSaveFPRegisters = 0;
+#endif
+
+ // The target for C calls is always an address (i.e. machine pointer).
+ MachineType target_type = kMachPtr;
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallAddress, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ msig, // machine_sig
+ locations.Build(), // location_sig
+ 0, // stack_parameter_count
+ Operator::kNoProperties, // properties
+ kCalleeSaveRegisters, // callee-saved registers
+ kCalleeSaveFPRegisters, // callee-saved fp regs
+ CallDescriptor::kNoFlags, // flags
+ "c-call");
+}
+}
+}
+}
diff --git a/deps/v8/src/compiler/coalesced-live-ranges.cc b/deps/v8/src/compiler/coalesced-live-ranges.cc
index e81f5518bd..44dd336c83 100644
--- a/deps/v8/src/compiler/coalesced-live-ranges.cc
+++ b/deps/v8/src/compiler/coalesced-live-ranges.cc
@@ -10,136 +10,131 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define TRACE(...) \
- do { \
- if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
- } while (false)
+LiveRangeConflictIterator::LiveRangeConflictIterator(const LiveRange* range,
+ IntervalStore* storage)
+ : query_(range->first_interval()),
+ pos_(storage->end()),
+ intervals_(storage) {
+ MovePosAndQueryToFirstConflict();
+}
-const float CoalescedLiveRanges::kAllocatedRangeMultiplier = 10.0;
-void CoalescedLiveRanges::AllocateRange(LiveRange* range) {
- UpdateWeightAtAllocation(range);
- for (auto interval = range->first_interval(); interval != nullptr;
- interval = interval->next()) {
- storage().insert({interval->start(), interval->end(), range});
- }
+LiveRange* LiveRangeConflictIterator::Current() const {
+ if (IsFinished()) return nullptr;
+ return pos_->range_;
}
-void CoalescedLiveRanges::Remove(LiveRange* range) {
- for (auto interval = range->first_interval(); interval != nullptr;
- interval = interval->next()) {
- storage().erase({interval->start(), interval->end(), nullptr});
- }
- range->UnsetAssignedRegister();
-}
+void LiveRangeConflictIterator::MovePosToFirstConflictForQuery() {
+ DCHECK(query_ != nullptr);
+ auto end = intervals_->end();
+ LifetimePosition q_start = query_->start();
+ LifetimePosition q_end = query_->end();
+ if (intervals_->empty() || intervals_->rbegin()->end_ <= q_start ||
+ intervals_->begin()->start_ >= q_end) {
+ pos_ = end;
+ return;
+ }
-float CoalescedLiveRanges::GetMaximumConflictingWeight(
- const LiveRange* range) const {
- float ret = LiveRange::kInvalidWeight;
- auto end = storage().end();
- for (auto query = range->first_interval(); query != nullptr;
- query = query->next()) {
- auto conflict = GetFirstConflict(query);
-
- if (conflict == end) continue;
- for (; QueryIntersectsAllocatedInterval(query, conflict); ++conflict) {
- // It is possible we'll visit the same range multiple times, because
- // successive (not necessarily consecutive) intervals belong to the same
- // range, or because different intervals of the query range have the same
- // range as conflict.
- DCHECK_NE(conflict->range->weight(), LiveRange::kInvalidWeight);
- ret = Max(ret, conflict->range->weight());
- if (ret == LiveRange::kMaxWeight) break;
+ pos_ = intervals_->upper_bound(AsAllocatedInterval(q_start));
+ // pos is either at the end (no start strictly greater than q_start) or
+ // at some position with the aforementioned property. In either case, the
+ // allocated interval before this one may intersect our query:
+ // either because, although it starts before this query's start, it ends
+ // after; or because it starts exactly at the query start. So unless we're
+ // right at the beginning of the storage - meaning the first allocated
+ // interval is also starting after this query's start - see what's behind.
+ if (pos_ != intervals_->begin()) {
+ --pos_;
+ if (!QueryIntersectsAllocatedInterval()) {
+ // The interval behind wasn't intersecting, so move back.
+ ++pos_;
}
}
- return ret;
+ if (pos_ == end || !QueryIntersectsAllocatedInterval()) {
+ pos_ = end;
+ }
}
-void CoalescedLiveRanges::EvictAndRescheduleConflicts(
- LiveRange* range, AllocationScheduler* scheduler) {
- auto end = storage().end();
-
- for (auto query = range->first_interval(); query != nullptr;
- query = query->next()) {
- auto conflict = GetFirstConflict(query);
- if (conflict == end) continue;
- while (QueryIntersectsAllocatedInterval(query, conflict)) {
- LiveRange* range_to_evict = conflict->range;
- // Bypass successive intervals belonging to the same range, because we're
- // about to remove this range, and we don't want the storage iterator to
- // become invalid.
- while (conflict != end && conflict->range == range_to_evict) {
- ++conflict;
- }
-
- DCHECK(range_to_evict->HasRegisterAssigned());
- CHECK(!range_to_evict->IsFixed());
- Remove(range_to_evict);
- UpdateWeightAtEviction(range_to_evict);
- TRACE("Evicted range %d.\n", range_to_evict->id());
- scheduler->Schedule(range_to_evict);
+void LiveRangeConflictIterator::MovePosAndQueryToFirstConflict() {
+ auto end = intervals_->end();
+ for (; query_ != nullptr; query_ = query_->next()) {
+ MovePosToFirstConflictForQuery();
+ if (pos_ != end) {
+ DCHECK(QueryIntersectsAllocatedInterval());
+ return;
}
}
+
+ Invalidate();
}
-bool CoalescedLiveRanges::VerifyAllocationsAreValid() const {
- LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
- for (auto i : storage_) {
- if (i.start < last_end) {
- return false;
- }
- last_end = i.end;
+void LiveRangeConflictIterator::IncrementPosAndSkipOverRepetitions() {
+ auto end = intervals_->end();
+ DCHECK(pos_ != end);
+ LiveRange* current_conflict = Current();
+ while (pos_ != end && pos_->range_ == current_conflict) {
+ ++pos_;
}
- return true;
}
-void CoalescedLiveRanges::UpdateWeightAtAllocation(LiveRange* range) {
- DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
- range->set_weight(range->weight() * kAllocatedRangeMultiplier);
+LiveRange* LiveRangeConflictIterator::InternalGetNext(bool clean_behind) {
+ if (IsFinished()) return nullptr;
+
+ LiveRange* to_clear = Current();
+ IncrementPosAndSkipOverRepetitions();
+ // At this point, pos_ is either at the end, or on an interval that doesn't
+ // correspond to the same range as to_clear. This interval may not even be
+ // a conflict.
+ if (clean_behind) {
+ // Since we parked pos_ on an iterator that won't be affected by removal,
+ // we can safely delete to_clear's intervals.
+ for (auto interval = to_clear->first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ AllocatedInterval erase_key(interval->start(), interval->end(), nullptr);
+ intervals_->erase(erase_key);
+ }
+ }
+ // We may have parked pos_ at the end, or on a non-conflict. In that case,
+ // move to the next query and reinitialize pos and query. This may invalidate
+ // the iterator, if no more conflicts are available.
+ if (!QueryIntersectsAllocatedInterval()) {
+ query_ = query_->next();
+ MovePosAndQueryToFirstConflict();
+ }
+ return Current();
}
-void CoalescedLiveRanges::UpdateWeightAtEviction(LiveRange* range) {
- DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
- range->set_weight(range->weight() / kAllocatedRangeMultiplier);
+LiveRangeConflictIterator CoalescedLiveRanges::GetConflicts(
+ const LiveRange* range) {
+ return LiveRangeConflictIterator(range, &intervals());
}
-CoalescedLiveRanges::interval_iterator CoalescedLiveRanges::GetFirstConflict(
- const UseInterval* query) const {
- DCHECK(query != nullptr);
- auto end = storage().end();
- LifetimePosition q_start = query->start();
- LifetimePosition q_end = query->end();
-
- if (storage().empty() || storage().rbegin()->end <= q_start ||
- storage().begin()->start >= q_end) {
- return end;
+void CoalescedLiveRanges::AllocateRange(LiveRange* range) {
+ for (auto interval = range->first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ AllocatedInterval to_insert(interval->start(), interval->end(), range);
+ intervals().insert(to_insert);
}
+}
- auto ret = storage().upper_bound(AsAllocatedInterval(q_start));
- // ret is either at the end (no start strictly greater than q_start) or
- // at some position with the aforementioned property. In either case, the
- // allocated interval before this one may intersect our query:
- // either because, although it starts before this query's start, it ends
- // after; or because it starts exactly at the query start. So unless we're
- // right at the beginning of the storage - meaning the first allocated
- // interval is also starting after this query's start - see what's behind.
- if (ret != storage().begin()) {
- --ret;
- if (!QueryIntersectsAllocatedInterval(query, ret)) {
- // The interval behind wasn't intersecting, so move back.
- ++ret;
+
+bool CoalescedLiveRanges::VerifyAllocationsAreValidForTesting() const {
+ LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
+ for (auto i : intervals_) {
+ if (i.start_ < last_end) {
+ return false;
}
+ last_end = i.end_;
}
- if (ret != end && QueryIntersectsAllocatedInterval(query, ret)) return ret;
- return end;
+ return true;
}
diff --git a/deps/v8/src/compiler/coalesced-live-ranges.h b/deps/v8/src/compiler/coalesced-live-ranges.h
index f12517203f..e617c0a251 100644
--- a/deps/v8/src/compiler/coalesced-live-ranges.h
+++ b/deps/v8/src/compiler/coalesced-live-ranges.h
@@ -13,8 +13,96 @@ namespace internal {
namespace compiler {
-class AllocationScheduler;
+// Implementation detail for CoalescedLiveRanges.
+struct AllocatedInterval {
+ AllocatedInterval(LifetimePosition start, LifetimePosition end,
+ LiveRange* range)
+ : start_(start), end_(end), range_(range) {}
+
+ LifetimePosition start_;
+ LifetimePosition end_;
+ LiveRange* range_;
+ bool operator<(const AllocatedInterval& other) const {
+ return start_ < other.start_;
+ }
+ bool operator>(const AllocatedInterval& other) const {
+ return start_ > other.start_;
+ }
+};
+typedef ZoneSet<AllocatedInterval> IntervalStore;
+
+
+// An iterator over conflicts of a live range, obtained from CoalescedLiveRanges
+// The design supports two main scenarios (see GreedyAllocator):
+// (1) observing each conflicting range, without mutating the allocations, and
+// (2) observing each conflicting range, and then moving to the next, after
+// removing the current conflict.
+class LiveRangeConflictIterator {
+ public:
+ // Current conflict. nullptr if no conflicts, or if we reached the end of
+ // conflicts.
+ LiveRange* Current() const;
+
+ // Get the next conflict. Caller should handle non-consecutive repetitions of
+ // the same range.
+ LiveRange* GetNext() { return InternalGetNext(false); }
+
+ // Get the next conflict, after evicting the current one. Caller may expect
+ // to never observe the same live range more than once.
+ LiveRange* RemoveCurrentAndGetNext() { return InternalGetNext(true); }
+
+ private:
+ friend class CoalescedLiveRanges;
+
+ typedef IntervalStore::const_iterator interval_iterator;
+ LiveRangeConflictIterator(const LiveRange* range, IntervalStore* store);
+
+ // Move the store iterator to first interval intersecting query. Since the
+ // intervals are sorted, subsequent intervals intersecting query follow. May
+ // leave the store iterator at "end", meaning that the current query does not
+ // have an intersection.
+ void MovePosToFirstConflictForQuery();
+
+ // Move both query and store iterator to the first intersection, if any. If
+ // none, then it invalidates the iterator (IsFinished() == true)
+ void MovePosAndQueryToFirstConflict();
+ // Increment pos and skip over intervals belonging to the same range we
+ // started with (i.e. Current() before the call). It is possible that range
+ // will be seen again, but not consecutively.
+ void IncrementPosAndSkipOverRepetitions();
+
+ // Common implementation used by both GetNext as well as
+ // ClearCurrentAndGetNext.
+ LiveRange* InternalGetNext(bool clean_behind);
+
+ bool IsFinished() const { return query_ == nullptr; }
+
+ static AllocatedInterval AsAllocatedInterval(LifetimePosition pos) {
+ return AllocatedInterval(pos, LifetimePosition::Invalid(), nullptr);
+ }
+
+ // Intersection utilities.
+ static bool Intersects(LifetimePosition a_start, LifetimePosition a_end,
+ LifetimePosition b_start, LifetimePosition b_end) {
+ return a_start < b_end && b_start < a_end;
+ }
+
+ bool QueryIntersectsAllocatedInterval() const {
+ DCHECK(query_ != nullptr);
+ return pos_ != intervals_->end() &&
+ Intersects(query_->start(), query_->end(), pos_->start_, pos_->end_);
+ }
+
+ void Invalidate() {
+ query_ = nullptr;
+ pos_ = intervals_->end();
+ }
+
+ const UseInterval* query_;
+ interval_iterator pos_;
+ IntervalStore* intervals_;
+};
// Collection of live ranges allocated to the same register.
// It supports efficiently finding all conflicts for a given, non-allocated
@@ -30,45 +118,27 @@ class AllocationScheduler;
// traversal of conflicts.
class CoalescedLiveRanges : public ZoneObject {
public:
- explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {}
- void clear() { storage_.clear(); }
+ explicit CoalescedLiveRanges(Zone* zone) : intervals_(zone) {}
+ void clear() { intervals_.clear(); }
- bool empty() const { return storage_.empty(); }
+ bool empty() const { return intervals_.empty(); }
- // Returns kInvalidWeight if there are no conflicts, or the largest weight of
- // a range conflicting with the given range.
- float GetMaximumConflictingWeight(const LiveRange* range) const;
+ // Iterate over each live range conflicting with the provided one.
+ // The same live range may be observed multiple, but non-consecutive times.
+ LiveRangeConflictIterator GetConflicts(const LiveRange* range);
- // Evicts all conflicts of the given range, and reschedules them with the
- // provided scheduler.
- void EvictAndRescheduleConflicts(LiveRange* range,
- AllocationScheduler* scheduler);
// Allocates a range with a pre-calculated candidate weight.
void AllocateRange(LiveRange* range);
- // TODO(mtrofin): remove this in favor of comprehensive unit tests.
- bool VerifyAllocationsAreValid() const;
+ // Unit testing API, verifying that allocated intervals do not overlap.
+ bool VerifyAllocationsAreValidForTesting() const;
private:
static const float kAllocatedRangeMultiplier;
- // Storage detail for CoalescedLiveRanges.
- struct AllocatedInterval {
- LifetimePosition start;
- LifetimePosition end;
- LiveRange* range;
- bool operator<(const AllocatedInterval& other) const {
- return start < other.start;
- }
- bool operator>(const AllocatedInterval& other) const {
- return start > other.start;
- }
- };
- typedef ZoneSet<AllocatedInterval> IntervalStore;
- typedef IntervalStore::const_iterator interval_iterator;
- IntervalStore& storage() { return storage_; }
- const IntervalStore& storage() const { return storage_; }
+ IntervalStore& intervals() { return intervals_; }
+ const IntervalStore& intervals() const { return intervals_; }
// Augment the weight of a range that is about to be allocated.
static void UpdateWeightAtAllocation(LiveRange* range);
@@ -76,29 +146,8 @@ class CoalescedLiveRanges : public ZoneObject {
// Reduce the weight of a range that has lost allocation.
static void UpdateWeightAtEviction(LiveRange* range);
- // Intersection utilities.
- static bool Intersects(LifetimePosition a_start, LifetimePosition a_end,
- LifetimePosition b_start, LifetimePosition b_end) {
- return a_start < b_end && b_start < a_end;
- }
- static AllocatedInterval AsAllocatedInterval(LifetimePosition pos) {
- return {pos, LifetimePosition::Invalid(), nullptr};
- }
-
- bool QueryIntersectsAllocatedInterval(const UseInterval* query,
- interval_iterator& pos) const {
- DCHECK(query != nullptr);
- return pos != storage().end() &&
- Intersects(query->start(), query->end(), pos->start, pos->end);
- }
-
- void Remove(LiveRange* range);
-
- // Get the first interval intersecting query. Since the intervals are sorted,
- // subsequent intervals intersecting query follow.
- interval_iterator GetFirstConflict(const UseInterval* query) const;
- IntervalStore storage_;
+ IntervalStore intervals_;
DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
};
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 2903c3d370..07a741f73e 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -7,6 +7,7 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
+#include "src/frames-inl.h"
#include "src/snapshot/serialize.h" // TODO(turbofan): RootIndexMap
namespace v8 {
@@ -215,10 +216,16 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
Safepoint::DeoptMode deopt_mode) {
Safepoint safepoint =
safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
+ int stackSlotToSpillSlotDelta =
+ frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
for (auto& operand : references->reference_operands()) {
if (operand.IsStackSlot()) {
- safepoint.DefinePointerSlot(StackSlotOperand::cast(operand).index(),
- zone());
+ int index = StackSlotOperand::cast(operand).index();
+ DCHECK(index >= 0);
+ // Safepoint table indices are 0-based from the beginning of the spill
+ // slot area, adjust appropriately.
+ index -= stackSlotToSpillSlotDelta;
+ safepoint.DefinePointerSlot(index, zone());
} else if (operand.IsRegister() && (kind & Safepoint::kWithRegisters)) {
Register reg =
Register::FromAllocationIndex(RegisterOperand::cast(operand).index());
@@ -231,7 +238,8 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
bool CodeGenerator::IsMaterializableFromFrame(Handle<HeapObject> object,
int* offset_return) {
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
- if (object.is_identical_to(info()->context()) && !info()->is_osr()) {
+ if (info()->has_context() && object.is_identical_to(info()->context()) &&
+ !info()->is_osr()) {
*offset_return = StandardFrameConstants::kContextOffset;
return true;
} else if (object.is_identical_to(info()->closure())) {
@@ -245,7 +253,9 @@ bool CodeGenerator::IsMaterializableFromFrame(Handle<HeapObject> object,
bool CodeGenerator::IsMaterializableFromRoot(
Handle<HeapObject> object, Heap::RootListIndex* index_return) {
- if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
+ const CallDescriptor* incoming_descriptor =
+ linkage()->GetIncomingDescriptor();
+ if (incoming_descriptor->flags() & CallDescriptor::kCanUseRoots) {
RootIndexMap map(isolate());
int root_index = map.Lookup(*object);
if (root_index != RootIndexMap::kInvalidRootIndex) {
@@ -528,6 +538,7 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
Handle<SharedFunctionInfo> shared_info;
if (!descriptor->shared_info().ToHandle(&shared_info)) {
+ if (!info()->has_shared_info()) return; // Stub with no SharedFunctionInfo.
shared_info = info()->shared_info();
}
int shared_info_id = DefineDeoptimizationLiteral(shared_info);
@@ -652,61 +663,6 @@ void CodeGenerator::MarkLazyDeoptSite() {
last_lazy_deopt_pc_ = masm()->pc_offset();
}
-#if !V8_TURBOFAN_BACKEND
-
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
- UNIMPLEMENTED();
-}
-
-
-void CodeGenerator::AssembleArchBranch(Instruction* instr,
- BranchInfo* branch) {
- UNIMPLEMENTED();
-}
-
-
-void CodeGenerator::AssembleArchBoolean(Instruction* instr,
- FlagsCondition condition) {
- UNIMPLEMENTED();
-}
-
-
-void CodeGenerator::AssembleArchJump(RpoNumber target) { UNIMPLEMENTED(); }
-
-
-void CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
- UNIMPLEMENTED();
-}
-
-
-void CodeGenerator::AssemblePrologue() { UNIMPLEMENTED(); }
-
-
-void CodeGenerator::AssembleReturn() { UNIMPLEMENTED(); }
-
-
-void CodeGenerator::AssembleMove(InstructionOperand* source,
- InstructionOperand* destination) {
- UNIMPLEMENTED();
-}
-
-
-void CodeGenerator::AssembleSwap(InstructionOperand* source,
- InstructionOperand* destination) {
- UNIMPLEMENTED();
-}
-
-
-void CodeGenerator::AddNopForSmiCodeInlining() { UNIMPLEMENTED(); }
-
-
-void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
- UNIMPLEMENTED();
-}
-
-#endif // !V8_TURBOFAN_BACKEND
-
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
: masm_(gen->masm()), next_(gen->ools_) {
diff --git a/deps/v8/src/compiler/common-node-cache.cc b/deps/v8/src/compiler/common-node-cache.cc
index ee1fa0ff24..b005c952dd 100644
--- a/deps/v8/src/compiler/common-node-cache.cc
+++ b/deps/v8/src/compiler/common-node-cache.cc
@@ -5,6 +5,8 @@
#include "src/compiler/common-node-cache.h"
#include "src/assembler.h"
+#include "src/compiler/node.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index ac1f754575..a809cc8aab 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -776,9 +776,11 @@ const Operator* CommonOperatorBuilder::ResizeMergeOrPhi(const Operator* op,
const FrameStateFunctionInfo*
CommonOperatorBuilder::CreateFrameStateFunctionInfo(
FrameStateType type, int parameter_count, int local_count,
- Handle<SharedFunctionInfo> shared_info) {
+ Handle<SharedFunctionInfo> shared_info,
+ ContextCallingMode context_calling_mode) {
return new (zone()->New(sizeof(FrameStateFunctionInfo)))
- FrameStateFunctionInfo(type, parameter_count, local_count, shared_info);
+ FrameStateFunctionInfo(type, parameter_count, local_count, shared_info,
+ context_calling_mode);
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index d9e5f85b9e..cc2ae22935 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -163,7 +163,8 @@ class CommonOperatorBuilder final : public ZoneObject {
// Constructs function info for frame state construction.
const FrameStateFunctionInfo* CreateFrameStateFunctionInfo(
FrameStateType type, int parameter_count, int local_count,
- Handle<SharedFunctionInfo> shared_info);
+ Handle<SharedFunctionInfo> shared_info,
+ ContextCallingMode context_calling_mode);
private:
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index 76d6749d0f..7170a845f7 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -4,6 +4,7 @@
#include "src/base/functional.h"
#include "src/compiler/frame-states.h"
+#include "src/handles-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index 42c41f9107..0684f112aa 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -5,10 +5,15 @@
#ifndef V8_COMPILER_FRAME_STATES_H_
#define V8_COMPILER_FRAME_STATES_H_
-#include "src/handles-inl.h"
+#include "src/handles.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class SharedFunctionInfo;
+
namespace compiler {
// Flag that describes how to combine the current environment with
@@ -76,26 +81,38 @@ enum class FrameStateType {
};
+enum ContextCallingMode {
+ CALL_MAINTAINS_NATIVE_CONTEXT,
+ CALL_CHANGES_NATIVE_CONTEXT
+};
+
+
class FrameStateFunctionInfo {
public:
FrameStateFunctionInfo(FrameStateType type, int parameter_count,
int local_count,
- Handle<SharedFunctionInfo> shared_info)
+ Handle<SharedFunctionInfo> shared_info,
+ ContextCallingMode context_calling_mode)
: type_(type),
parameter_count_(parameter_count),
local_count_(local_count),
- shared_info_(shared_info) {}
+ shared_info_(shared_info),
+ context_calling_mode_(context_calling_mode) {}
int local_count() const { return local_count_; }
int parameter_count() const { return parameter_count_; }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
FrameStateType type() const { return type_; }
+ ContextCallingMode context_calling_mode() const {
+ return context_calling_mode_;
+ }
private:
FrameStateType const type_;
int const parameter_count_;
int const local_count_;
Handle<SharedFunctionInfo> const shared_info_;
+ ContextCallingMode context_calling_mode_;
};
diff --git a/deps/v8/src/compiler/frame.cc b/deps/v8/src/compiler/frame.cc
new file mode 100644
index 0000000000..079fccb71c
--- /dev/null
+++ b/deps/v8/src/compiler/frame.cc
@@ -0,0 +1,24 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/frame.h"
+
+#include "src/compiler/linkage.h"
+#include "src/compiler/register-allocator.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Frame::Frame(int fixed_frame_size_in_slots)
+ : frame_slot_count_(fixed_frame_size_in_slots),
+ spilled_callee_register_slot_count_(0),
+ stack_slot_count_(0),
+ allocated_registers_(NULL),
+ allocated_double_registers_(NULL) {}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index 2850a8c1a1..aa823b6ba8 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -6,25 +6,93 @@
#define V8_COMPILER_FRAME_H_
#include "src/bit-vector.h"
+#include "src/frames.h"
namespace v8 {
namespace internal {
namespace compiler {
-// Collects the spill slot requirements and the allocated general and double
-// registers for a compiled function. Frames are usually populated by the
-// register allocator and are used by Linkage to generate code for the prologue
-// and epilogue to compiled code.
+// Collects the spill slot and other frame slot requirements for a compiled
+// function. Frames are usually populated by the register allocator and are used
+// by Linkage to generate code for the prologue and epilogue to compiled code.
+//
+// Frames are divided up into three regions. The first is the fixed header,
+// which always has a constant size and can be predicted before code generation
+// begins depending on the type of code being generated. The second is the
+// region for spill slots, which is immediately below the fixed header and grows
+// as the register allocator needs to spill to the stack and asks the frame for
+// more space. The third region, which contains the callee-saved registers must
+// be reserved after register allocation, since its size can only be precisely
+// determined after register allocation once the number of used callee-saved
+// register is certain.
+//
+// Every pointer in a frame has a slot id. On 32-bit platforms, doubles consume
+// two slots.
+//
+// Stack slot indices >= 0 access the callee stack with slot 0 corresponding to
+// the callee's saved return address and 1 corresponding to the saved frame
+// pointer. Some frames have additional information stored in the fixed header,
+// for example JSFunctions store the function context and marker in the fixed
+// header, with slot index 2 corresponding to the current function context and 3
+// corresponding to the frame marker/JSFunction. The frame region immediately
+// below the fixed header contains spill slots starting a 4 for JsFunctions. The
+// callee-saved frame region below that starts at 4+spilled_slot_count. Callee
+// stack slots corresponding to parameters are accessible through negative slot
+// ids.
+//
+// Every slot of a caller or callee frame is accessible by the register
+// allocator and gap resolver with a SpillSlotOperand containing its
+// corresponding slot id.
+//
+// Below an example JSFunction Frame with slot ids, frame regions and contents:
+//
+// slot JS frame
+// +-----------------+----------------------------
+// -n-1 | parameter 0 | ^
+// |- - - - - - - - -| |
+// -n | | Caller
+// ... | ... | frame slots
+// -2 | parameter n-1 | (slot < 0)
+// |- - - - - - - - -| |
+// -1 | parameter n | v
+// -----+-----------------+----------------------------
+// 0 | return addr | ^ ^
+// |- - - - - - - - -| | |
+// 1 | saved frame ptr | Fixed |
+// |- - - - - - - - -| Header <-- frame ptr |
+// 2 | Context | | |
+// |- - - - - - - - -| | |
+// 3 |JSFunction/Marker| v |
+// +-----------------+---- |
+// 4 | spill 1 | ^ Callee
+// |- - - - - - - - -| | frame slots
+// ... | ... | Spill slots (slot >= 0)
+// |- - - - - - - - -| | |
+// m+4 | spill m | v |
+// +-----------------+---- |
+// m+5 | callee-saved 1 | ^ |
+// |- - - - - - - - -| | |
+// | ... | Callee-saved |
+// |- - - - - - - - -| | |
+// m+r+4 | callee-saved r | v v
+// -----+-----------------+----- <-- stack ptr ---------
+//
class Frame : public ZoneObject {
public:
- Frame()
- : register_save_area_size_(0),
- spill_slot_count_(0),
- osr_stack_slot_count_(0),
- allocated_registers_(NULL),
- allocated_double_registers_(NULL) {}
+ explicit Frame(int fixed_frame_size_in_slots);
- inline int GetSpillSlotCount() { return spill_slot_count_; }
+ inline int GetTotalFrameSlotCount() { return frame_slot_count_; }
+
+ inline int GetSavedCalleeRegisterSlotCount() {
+ return spilled_callee_register_slot_count_;
+ }
+ inline int GetSpillSlotCount() { return stack_slot_count_; }
+
+ inline void SetElidedFrameSizeInSlots(int slots) {
+ DCHECK_EQ(0, spilled_callee_register_slot_count_);
+ DCHECK_EQ(0, stack_slot_count_);
+ frame_slot_count_ = slots;
+ }
void SetAllocatedRegisters(BitVector* regs) {
DCHECK(allocated_registers_ == NULL);
@@ -40,41 +108,50 @@ class Frame : public ZoneObject {
return !allocated_double_registers_->IsEmpty();
}
- void SetRegisterSaveAreaSize(int size) {
- DCHECK(IsAligned(size, kPointerSize));
- register_save_area_size_ = size;
+ int AlignSavedCalleeRegisterSlots() {
+ DCHECK_EQ(0, spilled_callee_register_slot_count_);
+ int frame_slot_count_before = frame_slot_count_;
+ frame_slot_count_ = RoundUp(frame_slot_count_, 2);
+ return frame_slot_count_before - frame_slot_count_;
}
- int GetRegisterSaveAreaSize() { return register_save_area_size_; }
+ void AllocateSavedCalleeRegisterSlots(int count) {
+ frame_slot_count_ += count;
+ spilled_callee_register_slot_count_ += count;
+ }
- // OSR stack slots, including locals and expression stack slots.
- void SetOsrStackSlotCount(int slots) {
- DCHECK(slots >= 0);
- osr_stack_slot_count_ = slots;
+ int AllocateSpillSlot(int width) {
+ DCHECK_EQ(0, spilled_callee_register_slot_count_);
+ int frame_slot_count_before = frame_slot_count_;
+ int slot = AllocateAlignedFrameSlot(width);
+ stack_slot_count_ += (frame_slot_count_ - frame_slot_count_before);
+ return slot;
}
- int GetOsrStackSlotCount() { return osr_stack_slot_count_; }
+ int ReserveSpillSlots(size_t slot_count) {
+ DCHECK_EQ(0, spilled_callee_register_slot_count_);
+ DCHECK_EQ(0, stack_slot_count_);
+ stack_slot_count_ += static_cast<int>(slot_count);
+ frame_slot_count_ += static_cast<int>(slot_count);
+ return frame_slot_count_ - 1;
+ }
- int AllocateSpillSlot(int width) {
+ private:
+ int AllocateAlignedFrameSlot(int width) {
DCHECK(width == 4 || width == 8);
// Skip one slot if necessary.
if (width > kPointerSize) {
DCHECK(width == kPointerSize * 2);
- spill_slot_count_++;
- spill_slot_count_ |= 1;
+ frame_slot_count_++;
+ frame_slot_count_ |= 1;
}
- return spill_slot_count_++;
- }
-
- void ReserveSpillSlots(size_t slot_count) {
- DCHECK_EQ(0, spill_slot_count_); // can only reserve before allocation.
- spill_slot_count_ = static_cast<int>(slot_count);
+ return frame_slot_count_++;
}
private:
- int register_save_area_size_;
- int spill_slot_count_;
- int osr_stack_slot_count_;
+ int frame_slot_count_;
+ int spilled_callee_register_slot_count_;
+ int stack_slot_count_;
BitVector* allocated_registers_;
BitVector* allocated_double_registers_;
diff --git a/deps/v8/src/compiler/graph-builder.h b/deps/v8/src/compiler/graph-builder.h
deleted file mode 100644
index f2fb7f6c09..0000000000
--- a/deps/v8/src/compiler/graph-builder.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_GRAPH_BUILDER_H_
-#define V8_COMPILER_GRAPH_BUILDER_H_
-
-#include "src/allocation.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/node.h"
-#include "src/unique.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// A common base class for anything that creates nodes in a graph.
-class GraphBuilder {
- public:
- GraphBuilder(Isolate* isolate, Graph* graph)
- : isolate_(isolate), graph_(graph) {}
- virtual ~GraphBuilder() {}
-
- Node* NewNode(const Operator* op, bool incomplete = false) {
- return MakeNode(op, 0, static_cast<Node**>(NULL), incomplete);
- }
-
- Node* NewNode(const Operator* op, Node* n1) {
- return MakeNode(op, 1, &n1, false);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2) {
- Node* buffer[] = {n1, n2};
- return MakeNode(op, arraysize(buffer), buffer, false);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
- Node* buffer[] = {n1, n2, n3};
- return MakeNode(op, arraysize(buffer), buffer, false);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
- Node* buffer[] = {n1, n2, n3, n4};
- return MakeNode(op, arraysize(buffer), buffer, false);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5) {
- Node* buffer[] = {n1, n2, n3, n4, n5};
- return MakeNode(op, arraysize(buffer), buffer, false);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6};
- return MakeNode(op, arraysize(nodes), nodes, false);
- }
-
- Node* NewNode(const Operator* op, int value_input_count, Node** value_inputs,
- bool incomplete = false) {
- return MakeNode(op, value_input_count, value_inputs, incomplete);
- }
-
- Isolate* isolate() const { return isolate_; }
- Graph* graph() const { return graph_; }
-
- protected:
- // Base implementation used by all factory methods.
- virtual Node* MakeNode(const Operator* op, int value_input_count,
- Node** value_inputs, bool incomplete) = 0;
-
- private:
- Isolate* isolate_;
- Graph* graph_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_GRAPH_BUILDER_H__
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 313edb9b65..bf3d8edea4 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -28,7 +28,7 @@ namespace compiler {
FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
const char* suffix, const char* mode) {
EmbeddedVector<char, 256> filename(0);
- SmartArrayPointer<char> function_name;
+ base::SmartArrayPointer<char> function_name;
if (info->has_shared_info()) {
function_name = info->shared_info()->DebugName()->ToCString();
if (strlen(function_name.get()) > 0) {
@@ -492,7 +492,7 @@ void GraphC1Visualizer::PrintIntProperty(const char* name, int value) {
void GraphC1Visualizer::PrintCompilation(const CompilationInfo* info) {
Tag tag(this, "compilation");
if (info->IsOptimizing()) {
- Handle<String> name = info->function()->debug_name();
+ Handle<String> name = info->literal()->debug_name();
PrintStringProperty("name", name->ToCString().get());
PrintIndent();
os_ << "method \"" << name->ToCString().get() << ":"
diff --git a/deps/v8/src/compiler/greedy-allocator.cc b/deps/v8/src/compiler/greedy-allocator.cc
index 8d658c39ff..2da30bd289 100644
--- a/deps/v8/src/compiler/greedy-allocator.cc
+++ b/deps/v8/src/compiler/greedy-allocator.cc
@@ -9,12 +9,16 @@ namespace v8 {
namespace internal {
namespace compiler {
+
#define TRACE(...) \
do { \
if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
} while (false)
+const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0;
+
+
namespace {
@@ -131,12 +135,10 @@ void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
DCHECK(!range->HasRegisterAssigned());
- current_allocations(reg_id)->AllocateRange(range);
+ AllocateRegisterToRange(reg_id, range);
TRACE("Assigning %s to range %d\n", RegisterName(reg_id), range->id());
range->set_assigned_register(reg_id);
-
- DCHECK(current_allocations(reg_id)->VerifyAllocationsAreValid());
}
@@ -153,7 +155,7 @@ void GreedyAllocator::PreallocateFixedRanges() {
int reg_nr = fixed_range->assigned_register();
EnsureValidRangeWeight(fixed_range);
- current_allocations(reg_nr)->AllocateRange(fixed_range);
+ AllocateRegisterToRange(reg_nr, fixed_range);
}
}
}
@@ -190,8 +192,7 @@ void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
// where the maximum conflict is lower than the candidate's weight, the one
// with the smallest such weight.
for (int i = 0; i < num_registers(); i++) {
- float max_conflict_weight =
- current_allocations(i)->GetMaximumConflictingWeight(range);
+ float max_conflict_weight = GetMaximumConflictingWeight(i, range);
if (max_conflict_weight == LiveRange::kInvalidWeight) {
free_reg = i;
break;
@@ -216,8 +217,7 @@ void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
if (evictable_reg >= 0) {
TRACE("Found evictable register %s for live range %d\n",
RegisterName(free_reg), range->id());
- current_allocations(evictable_reg)
- ->EvictAndRescheduleConflicts(range, &scheduler());
+ EvictAndRescheduleConflicts(evictable_reg, range);
AssignRangeToRegister(evictable_reg, range);
return;
}
@@ -227,6 +227,21 @@ void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
}
+void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
+ const LiveRange* range) {
+ auto conflicts = current_allocations(reg_id)->GetConflicts(range);
+ for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
+ conflict = conflicts.RemoveCurrentAndGetNext()) {
+ DCHECK(conflict->HasRegisterAssigned());
+ CHECK(!conflict->IsFixed());
+ conflict->UnsetAssignedRegister();
+ UpdateWeightAtEviction(conflict);
+ scheduler().Schedule(conflict);
+ TRACE("Evicted range %d.\n", conflict->id());
+ }
+}
+
+
void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
size_t initial_range_count = data()->live_ranges().size();
for (size_t i = 0; i < initial_range_count; ++i) {
@@ -298,6 +313,22 @@ void GreedyAllocator::AllocateRegisters() {
}
+float GreedyAllocator::GetMaximumConflictingWeight(
+ unsigned reg_id, const LiveRange* range) const {
+ float ret = LiveRange::kInvalidWeight;
+
+ auto conflicts = current_allocations(reg_id)->GetConflicts(range);
+ for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
+ conflict = conflicts.GetNext()) {
+ DCHECK_NE(conflict->weight(), LiveRange::kInvalidWeight);
+ ret = Max(ret, conflict->weight());
+ if (ret == LiveRange::kMaxWeight) return ret;
+ }
+
+ return ret;
+}
+
+
void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
// The live range weight will be invalidated when ranges are created or split.
// Otherwise, it is consistently updated when the range is allocated or
diff --git a/deps/v8/src/compiler/greedy-allocator.h b/deps/v8/src/compiler/greedy-allocator.h
index 3ec180b2ba..c4e330eb97 100644
--- a/deps/v8/src/compiler/greedy-allocator.h
+++ b/deps/v8/src/compiler/greedy-allocator.h
@@ -62,10 +62,28 @@ class GreedyAllocator final : public RegisterAllocator {
void AllocateRegisters();
private:
+ static const float kAllocatedRangeMultiplier;
+
+ static void UpdateWeightAtAllocation(LiveRange* range) {
+ DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
+ range->set_weight(range->weight() * kAllocatedRangeMultiplier);
+ }
+
+
+ static void UpdateWeightAtEviction(LiveRange* range) {
+ DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
+ range->set_weight(range->weight() / kAllocatedRangeMultiplier);
+ }
+
AllocationScheduler& scheduler() { return scheduler_; }
CoalescedLiveRanges* current_allocations(unsigned i) {
return allocations_[i];
}
+
+ CoalescedLiveRanges* current_allocations(unsigned i) const {
+ return allocations_[i];
+ }
+
Zone* local_zone() const { return local_zone_; }
// Insert fixed ranges.
@@ -75,6 +93,13 @@ class GreedyAllocator final : public RegisterAllocator {
// TODO(mtrofin): groups.
void ScheduleAllocationCandidates();
+ void AllocateRegisterToRange(unsigned reg_id, LiveRange* range) {
+ UpdateWeightAtAllocation(range);
+ current_allocations(reg_id)->AllocateRange(range);
+ }
+ // Evict and reschedule conflicts of a given range, at a given register.
+ void EvictAndRescheduleConflicts(unsigned reg_id, const LiveRange* range);
+
// Find the optimal split for ranges defined by a memory operand, e.g.
// constants or function parameters passed on the stack.
void SplitAndSpillRangesDefinedByMemoryOperand();
@@ -92,6 +117,11 @@ class GreedyAllocator final : public RegisterAllocator {
// Calculate the new weight of a range that is about to be allocated.
float GetAllocatedRangeWeight(float candidate_weight);
+ // Returns kInvalidWeight if there are no conflicts, or the largest weight of
+ // a range conflicting with the given range, at the given register.
+ float GetMaximumConflictingWeight(unsigned reg_id,
+ const LiveRange* range) const;
+
// This is the extension point for splitting heuristics.
void SplitOrSpillBlockedRange(LiveRange* range);
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 4690a8cc05..4241a5e982 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -7,7 +7,9 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
#include "src/ia32/assembler-ia32.h"
+#include "src/ia32/frames-ia32.h"
#include "src/ia32/macro-assembler-ia32.h"
#include "src/scopes.h"
@@ -46,10 +48,10 @@ class IA32OperandConverter : public InstructionOperandConverter {
return Operand(ToDoubleRegister(op));
}
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- // The linkage computes where all spill slots are located.
- FrameOffset offset = linkage()->GetFrameOffset(
- AllocatedOperand::cast(op)->index(), frame(), extra);
- return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+ FrameOffset offset =
+ linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ return Operand(offset.from_stack_pointer() ? esp : ebp,
+ offset.offset() + extra);
}
Operand HighOperand(InstructionOperand* op) {
@@ -318,7 +320,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ jmp(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
- __ jmp(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
+ __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(reg);
}
break;
}
@@ -873,7 +876,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kIA32Push:
- if (HasImmediateInput(instr, 0)) {
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
+ } else if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
} else {
__ push(i.InputOperand(0));
@@ -1255,34 +1261,22 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
// Assemble a prologue similar the to cdecl calling convention.
__ push(ebp);
__ mov(ebp, esp);
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) { // Save callee-saved registers.
- int register_save_area_size = 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- __ push(Register::from_code(i));
- register_save_area_size += kPointerSize;
- }
- frame()->SetRegisterSaveAreaSize(register_save_area_size);
- }
} else if (descriptor->IsJSFunctionCall()) {
// TODO(turbofan): this prologue is redundant with OSR, but needed for
// code aging.
CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
} else if (needs_frame_) {
__ StubPrologue();
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
}
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1295,64 +1289,57 @@ void CodeGenerator::AssemblePrologue() {
osr_pc_offset_ = __ pc_offset();
// TODO(titzer): cannot address target function == local #-1
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
- stack_slots -= frame()->GetOsrStackSlotCount();
+ stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ }
+
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (stack_shrink_slots > 0) {
+ __ sub(esp, Immediate(stack_shrink_slots * kPointerSize));
}
- if (stack_slots > 0) {
- // Allocate the stack slots used by this frame.
- __ sub(esp, Immediate(stack_slots * kPointerSize));
+ if (saves != 0) { // Save callee-saved registers.
+ DCHECK(!info()->is_osr());
+ int pushed = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ __ push(Register::from_code(i));
+ ++pushed;
+ }
+ frame()->AllocateSavedCalleeRegisterSlots(pushed);
}
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (frame()->GetRegisterSaveAreaSize() > 0) {
- // Remove this frame's spill slots first.
- if (stack_slots > 0) {
- __ add(esp, Immediate(stack_slots * kPointerSize));
- }
- // Restore registers.
- if (saves != 0) {
- for (int i = 0; i < Register::kNumRegisters; i++) {
- if (!((1 << i) & saves)) continue;
- __ pop(Register::from_code(i));
- }
- }
- __ pop(ebp); // Pop caller's frame pointer.
- __ ret(0);
- } else {
- // No saved registers.
- __ mov(esp, ebp); // Move stack pointer back to frame pointer.
- __ pop(ebp); // Pop caller's frame pointer.
- __ ret(0);
+
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ // Restore registers.
+ if (saves != 0) {
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ if (!((1 << i) & saves)) continue;
+ __ pop(Register::from_code(i));
}
+ }
+
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ __ mov(esp, ebp); // Move stack pointer back to frame pointer.
+ __ pop(ebp); // Pop caller's frame pointer.
} else if (descriptor->IsJSFunctionCall() || needs_frame_) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ jmp(&return_label_);
+ return;
} else {
__ bind(&return_label_);
__ mov(esp, ebp); // Move stack pointer back to frame pointer.
__ pop(ebp); // Pop caller's frame pointer.
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : (info()->IsStub()
- ? info()->code_stub()->GetStackParameterCount()
- : 0);
- if (pop_count == 0) {
- __ ret(0);
- } else {
- __ Ret(pop_count * kPointerSize, ebx);
- }
}
- } else {
- __ ret(0);
}
+ size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ // Might need ecx for scratch if pop_size is too big.
+ DCHECK_EQ(0, descriptor->CalleeSavedRegisters() & ecx.bit());
+ __ Ret(static_cast<int>(pop_size), ecx);
}
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 105ca8287b..1ea8dd6201 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -844,21 +844,28 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
- if (Node* node = buffer.pushed_nodes[n]) {
+ if (Node* input = buffer.pushed_nodes[n]) {
int const slot = static_cast<int>(n);
- InstructionOperand value =
- g.CanBeImmediate(node) ? g.UseImmediate(node) : g.UseRegister(node);
+ InstructionOperand value = g.CanBeImmediate(node)
+ ? g.UseImmediate(input)
+ : g.UseRegister(input);
Emit(kIA32Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
} else {
// Push any stack arguments.
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- // TODO(titzer): handle pushing double parameters.
+ for (Node* input : base::Reversed(buffer.pushed_nodes)) {
+ // Skip any alignment holes in pushed nodes.
+ if (input == nullptr) continue;
+ // TODO(titzer): IA32Push cannot handle stack->stack double moves
+ // because there is no way to encode fixed double slots.
InstructionOperand value =
- g.CanBeImmediate(node)
- ? g.UseImmediate(node)
- : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
+ g.CanBeImmediate(input)
+ ? g.UseImmediate(input)
+ : IsSupported(ATOM) ||
+ sequence()->IsFloat(GetVirtualRegister(input))
+ ? g.UseRegister(input)
+ : g.Use(input);
Emit(kIA32Push, g.NoOutput(), value);
}
}
@@ -948,12 +955,12 @@ void InstructionSelector::VisitTailCall(Node* node) {
InitializeCallBuffer(node, &buffer, true, true);
// Push any stack arguments.
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ for (Node* input : base::Reversed(buffer.pushed_nodes)) {
// TODO(titzer): Handle pushing double parameters.
InstructionOperand value =
- g.CanBeImmediate(node)
- ? g.UseImmediate(node)
- : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
+ g.CanBeImmediate(input)
+ ? g.UseImmediate(input)
+ : IsSupported(ATOM) ? g.UseRegister(input) : g.Use(input);
Emit(kIA32Push, g.NoOutput(), value);
}
diff --git a/deps/v8/src/compiler/ia32/linkage-ia32.cc b/deps/v8/src/compiler/ia32/linkage-ia32.cc
deleted file mode 100644
index 930a86c69f..0000000000
--- a/deps/v8/src/compiler/ia32/linkage-ia32.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/linkage-impl.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-struct IA32LinkageHelperTraits {
- static Register ReturnValueReg() { return eax; }
- static Register ReturnValue2Reg() { return edx; }
- static Register JSCallFunctionReg() { return edi; }
- static Register ContextReg() { return esi; }
- static Register RuntimeCallFunctionReg() { return ebx; }
- static Register RuntimeCallArgCountReg() { return eax; }
- static RegList CCalleeSaveRegisters() {
- return esi.bit() | edi.bit() | ebx.bit();
- }
- static RegList CCalleeSaveFPRegisters() { return 0; }
- static Register CRegisterParameter(int i) { return no_reg; }
- static int CRegisterParametersLength() { return 0; }
- static int CStackBackingStoreLength() { return 0; }
-};
-
-typedef LinkageHelper<IA32LinkageHelperTraits> LH;
-
-CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
- int parameter_count,
- CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Zone* zone, Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties) {
- return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
- properties);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
- int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties, MachineType return_type) {
- return LH::GetStubCallDescriptor(isolate, zone, descriptor,
- stack_parameter_count, flags, properties,
- return_type);
-}
-
-
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- const MachineSignature* sig) {
- return LH::GetSimplifiedCDescriptor(zone, sig);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index b34a914efc..50a94342c9 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -208,30 +208,29 @@ class OperandGenerator {
UnallocatedOperand ToUnallocatedOperand(LinkageLocation location,
MachineType type,
int virtual_register) {
- if (location.location_ == LinkageLocation::ANY_REGISTER) {
+ if (location.IsAnyRegister()) {
// any machine register.
return UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
virtual_register);
}
- if (location.location_ < 0) {
+ if (location.IsCallerFrameSlot()) {
// a location on the caller frame.
return UnallocatedOperand(UnallocatedOperand::FIXED_SLOT,
- location.location_, virtual_register);
+ location.AsCallerFrameSlot(), virtual_register);
}
- if (location.location_ > LinkageLocation::ANY_REGISTER) {
+ if (location.IsCalleeFrameSlot()) {
// a spill location on this (callee) frame.
- return UnallocatedOperand(
- UnallocatedOperand::FIXED_SLOT,
- location.location_ - LinkageLocation::ANY_REGISTER - 1,
- virtual_register);
+ return UnallocatedOperand(UnallocatedOperand::FIXED_SLOT,
+ location.AsCalleeFrameSlot(), virtual_register);
}
// a fixed register.
- if (RepresentationOf(type) == kRepFloat64) {
+ MachineType rep = RepresentationOf(type);
+ if (rep == kRepFloat64 || rep == kRepFloat32) {
return UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
- location.location_, virtual_register);
+ location.AsRegister(), virtual_register);
}
return UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- location.location_, virtual_register);
+ location.AsRegister(), virtual_register);
}
InstructionSelector* selector_;
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index 813da4f132..0ae1c63fea 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -777,8 +777,6 @@ void InstructionSelector::VisitNode(Node* node) {
}
-#if V8_TURBOFAN_BACKEND
-
void InstructionSelector::VisitLoadStackPointer(Node* node) {
OperandGenerator g(this);
Emit(kArchStackPointer, g.DefineAsRegister(node));
@@ -827,10 +825,8 @@ void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
}
-#endif // V8_TURBOFAN_BACKEND
-
// 32 bit targets do not implement the following instructions.
-#if !V8_TURBOFAN_BACKEND_64
+#if V8_TARGET_ARCH_32_BIT
void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
@@ -907,7 +903,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
UNIMPLEMENTED();
}
-#endif // V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
+#endif // V8_TARGET_ARCH_32_BIT
void InstructionSelector::VisitFinish(Node* node) {
@@ -997,9 +993,13 @@ void InstructionSelector::VisitGoto(BasicBlock* target) {
void InstructionSelector::VisitReturn(Node* value) {
DCHECK_NOT_NULL(value);
OperandGenerator g(this);
- Emit(kArchRet, g.NoOutput(),
- g.UseLocation(value, linkage()->GetReturnLocation(),
- linkage()->GetReturnType()));
+ if (linkage()->GetIncomingDescriptor()->ReturnCount() == 0) {
+ Emit(kArchRet, g.NoOutput());
+ } else {
+ Emit(kArchRet, g.NoOutput(),
+ g.UseLocation(value, linkage()->GetReturnLocation(),
+ linkage()->GetReturnType()));
+ }
}
@@ -1120,42 +1120,6 @@ void InstructionSelector::AddFrameStateInputs(
DCHECK(value_index == descriptor->GetSize());
}
-
-#if !V8_TURBOFAN_BACKEND
-
-#define DECLARE_UNIMPLEMENTED_SELECTOR(x) \
- void InstructionSelector::Visit##x(Node* node) { UNIMPLEMENTED(); }
-MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR)
-#undef DECLARE_UNIMPLEMENTED_SELECTOR
-
-
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
- UNIMPLEMENTED();
-}
-
-
-void InstructionSelector::VisitTailCall(Node* node) { UNIMPLEMENTED(); }
-
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- UNIMPLEMENTED();
-}
-
-
-void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
- UNIMPLEMENTED();
-}
-
-
-// static
-MachineOperatorBuilder::Flags
-InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::Flag::kNoFlags;
-}
-
-#endif // !V8_TURBOFAN_BACKEND
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index 83b45b39dd..9aebb9a17a 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -566,9 +566,12 @@ InstructionBlock* InstructionSequence::GetInstructionBlock(
int instruction_index) const {
DCHECK(instruction_blocks_->size() == block_starts_.size());
auto begin = block_starts_.begin();
- auto end = std::lower_bound(begin, block_starts_.end(), instruction_index,
- std::less_equal<int>());
- size_t index = std::distance(begin, end) - 1;
+ auto end = std::lower_bound(begin, block_starts_.end(), instruction_index);
+ // Post condition of std::lower_bound:
+ DCHECK(end == block_starts_.end() || *end >= instruction_index);
+ if (end == block_starts_.end() || *end > instruction_index) --end;
+ DCHECK(*end <= instruction_index);
+ size_t index = std::distance(begin, end);
auto block = instruction_blocks_->at(index);
DCHECK(block->code_start() <= instruction_index &&
instruction_index < block->code_end());
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index a87ef7dc9c..4f6a515f11 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -1023,7 +1023,6 @@ struct PrintableInstructionSequence;
// Represents architecture-specific generated code before, during, and after
// register allocation.
-// TODO(titzer): s/IsDouble/IsFloat64/
class InstructionSequence final : public ZoneObject {
public:
static InstructionBlocks* InstructionBlocksFor(Zone* zone,
@@ -1085,6 +1084,9 @@ class InstructionSequence final : public ZoneObject {
const_iterator begin() const { return instructions_.begin(); }
const_iterator end() const { return instructions_.end(); }
const InstructionDeque& instructions() const { return instructions_; }
+ int LastInstructionIndex() const {
+ return static_cast<int>(instructions().size()) - 1;
+ }
Instruction* InstructionAt(int index) const {
DCHECK(index >= 0);
diff --git a/deps/v8/src/compiler/interpreter-assembler.cc b/deps/v8/src/compiler/interpreter-assembler.cc
new file mode 100644
index 0000000000..47e014ba39
--- /dev/null
+++ b/deps/v8/src/compiler/interpreter-assembler.cc
@@ -0,0 +1,265 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/interpreter-assembler.h"
+
+#include <ostream>
+
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-type.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/schedule.h"
+#include "src/frames.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/macro-assembler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
+ interpreter::Bytecode bytecode)
+ : bytecode_(bytecode),
+ raw_assembler_(new RawMachineAssembler(
+ isolate, new (zone) Graph(zone),
+ Linkage::GetInterpreterDispatchDescriptor(zone), kMachPtr,
+ InstructionSelector::SupportedMachineOperatorFlags())),
+ end_node_(nullptr),
+ accumulator_(
+ raw_assembler_->Parameter(Linkage::kInterpreterAccumulatorParameter)),
+ code_generated_(false) {}
+
+
+InterpreterAssembler::~InterpreterAssembler() {}
+
+
+Handle<Code> InterpreterAssembler::GenerateCode() {
+ DCHECK(!code_generated_);
+
+ End();
+
+ Schedule* schedule = raw_assembler_->Export();
+ // TODO(rmcilroy): use a non-testing code generator.
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(
+ isolate(), raw_assembler_->call_descriptor(), graph(), schedule);
+
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_trace_ignition_codegen) {
+ OFStream os(stdout);
+ code->Disassemble(interpreter::Bytecodes::ToString(bytecode_), os);
+ os << std::flush;
+ }
+#endif
+
+ code_generated_ = true;
+ return code;
+}
+
+
+Node* InterpreterAssembler::GetAccumulator() {
+ return accumulator_;
+}
+
+
+void InterpreterAssembler::SetAccumulator(Node* value) {
+ accumulator_ = value;
+}
+
+
+Node* InterpreterAssembler::RegisterFileRawPointer() {
+ return raw_assembler_->Parameter(Linkage::kInterpreterRegisterFileParameter);
+}
+
+
+Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
+ return raw_assembler_->Parameter(Linkage::kInterpreterBytecodeArrayParameter);
+}
+
+
+Node* InterpreterAssembler::BytecodeOffset() {
+ return raw_assembler_->Parameter(
+ Linkage::kInterpreterBytecodeOffsetParameter);
+}
+
+
+Node* InterpreterAssembler::DispatchTableRawPointer() {
+ return raw_assembler_->Parameter(Linkage::kInterpreterDispatchTableParameter);
+}
+
+
+Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
+ return raw_assembler_->WordShl(index, Int32Constant(kPointerSizeLog2));
+}
+
+
+Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
+ return raw_assembler_->Load(kMachPtr, RegisterFileRawPointer(),
+ RegisterFrameOffset(reg_index));
+}
+
+
+Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
+ return raw_assembler_->Store(kMachPtr, RegisterFileRawPointer(),
+ RegisterFrameOffset(reg_index), value);
+}
+
+
+Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
+ DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
+ return raw_assembler_->Load(
+ kMachUint8, BytecodeArrayTaggedPointer(),
+ raw_assembler_->IntPtrAdd(BytecodeOffset(),
+ Int32Constant(1 + operand_index)));
+}
+
+
+Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
+ DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
+ Node* load = raw_assembler_->Load(
+ kMachInt8, BytecodeArrayTaggedPointer(),
+ raw_assembler_->IntPtrAdd(BytecodeOffset(),
+ Int32Constant(1 + operand_index)));
+ // Ensure that we sign extend to full pointer size
+ if (kPointerSize == 8) {
+ load = raw_assembler_->ChangeInt32ToInt64(load);
+ }
+ return load;
+}
+
+
+Node* InterpreterAssembler::BytecodeOperandImm8(int operand_index) {
+ DCHECK_EQ(interpreter::OperandType::kImm8,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperandSignExtended(operand_index);
+}
+
+
+Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
+ DCHECK_EQ(interpreter::OperandType::kReg,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperandSignExtended(operand_index);
+}
+
+
+Node* InterpreterAssembler::Int32Constant(int value) {
+ return raw_assembler_->Int32Constant(value);
+}
+
+
+Node* InterpreterAssembler::NumberConstant(double value) {
+ return raw_assembler_->NumberConstant(value);
+}
+
+
+Node* InterpreterAssembler::HeapConstant(Unique<HeapObject> object) {
+ return raw_assembler_->HeapConstant(object);
+}
+
+
+Node* InterpreterAssembler::SmiShiftBitsConstant() {
+ return Int32Constant(kSmiShiftSize + kSmiTagSize);
+}
+
+
+Node* InterpreterAssembler::SmiTag(Node* value) {
+ return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
+}
+
+
+Node* InterpreterAssembler::SmiUntag(Node* value) {
+ return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
+}
+
+
+void InterpreterAssembler::Return() {
+ Node* exit_trampoline_code_object =
+ HeapConstant(Unique<HeapObject>::CreateImmovable(
+ isolate()->builtins()->InterpreterExitTrampoline()));
+ // If the order of the parameters you need to change the call signature below.
+ STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
+ STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
+ STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
+ STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
+ STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
+ Node* tail_call = raw_assembler_->TailCallInterpreterDispatch(
+ call_descriptor(), exit_trampoline_code_object, GetAccumulator(),
+ RegisterFileRawPointer(), BytecodeOffset(), BytecodeArrayTaggedPointer(),
+ DispatchTableRawPointer());
+ // This should always be the end node.
+ SetEndInput(tail_call);
+}
+
+
+Node* InterpreterAssembler::Advance(int delta) {
+ return raw_assembler_->IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
+}
+
+
+void InterpreterAssembler::Dispatch() {
+ Node* new_bytecode_offset = Advance(interpreter::Bytecodes::Size(bytecode_));
+ Node* target_bytecode = raw_assembler_->Load(
+ kMachUint8, BytecodeArrayTaggedPointer(), new_bytecode_offset);
+
+ // TODO(rmcilroy): Create a code target dispatch table to avoid conversion
+ // from code object on every dispatch.
+ Node* target_code_object = raw_assembler_->Load(
+ kMachPtr, DispatchTableRawPointer(),
+ raw_assembler_->Word32Shl(target_bytecode,
+ Int32Constant(kPointerSizeLog2)));
+
+ // If the order of the parameters you need to change the call signature below.
+ STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
+ STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
+ STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
+ STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
+ STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
+ Node* tail_call = raw_assembler_->TailCallInterpreterDispatch(
+ call_descriptor(), target_code_object, GetAccumulator(),
+ RegisterFileRawPointer(), new_bytecode_offset,
+ BytecodeArrayTaggedPointer(), DispatchTableRawPointer());
+ // This should always be the end node.
+ SetEndInput(tail_call);
+}
+
+
+void InterpreterAssembler::SetEndInput(Node* input) {
+ DCHECK(!end_node_);
+ end_node_ = input;
+}
+
+
+void InterpreterAssembler::End() {
+ DCHECK(end_node_);
+ // TODO(rmcilroy): Support more than 1 end input.
+ Node* end = graph()->NewNode(raw_assembler_->common()->End(1), end_node_);
+ graph()->SetEnd(end);
+}
+
+
+// RawMachineAssembler delegate helpers:
+Isolate* InterpreterAssembler::isolate() { return raw_assembler_->isolate(); }
+
+
+Graph* InterpreterAssembler::graph() { return raw_assembler_->graph(); }
+
+
+CallDescriptor* InterpreterAssembler::call_descriptor() const {
+ return raw_assembler_->call_descriptor();
+}
+
+
+Schedule* InterpreterAssembler::schedule() {
+ return raw_assembler_->schedule();
+}
+
+
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/interpreter-assembler.h b/deps/v8/src/compiler/interpreter-assembler.h
new file mode 100644
index 0000000000..4662fc8042
--- /dev/null
+++ b/deps/v8/src/compiler/interpreter-assembler.h
@@ -0,0 +1,118 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INTERPRETER_ASSEMBLER_H_
+#define V8_COMPILER_INTERPRETER_ASSEMBLER_H_
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "src/allocation.h"
+#include "src/base/smart-pointers.h"
+#include "src/frames.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class Zone;
+
+namespace compiler {
+
+class CallDescriptor;
+class Graph;
+class Node;
+class Operator;
+class RawMachineAssembler;
+class Schedule;
+
+class InterpreterAssembler {
+ public:
+ InterpreterAssembler(Isolate* isolate, Zone* zone,
+ interpreter::Bytecode bytecode);
+ virtual ~InterpreterAssembler();
+
+ Handle<Code> GenerateCode();
+
+ // Returns the Imm8 immediate for bytecode operand |operand_index| in the
+ // current bytecode.
+ Node* BytecodeOperandImm8(int operand_index);
+ // Returns the register index for bytecode operand |operand_index| in the
+ // current bytecode.
+ Node* BytecodeOperandReg(int operand_index);
+
+ // Accumulator.
+ Node* GetAccumulator();
+ void SetAccumulator(Node* value);
+
+ // Loads from and stores to the interpreter register file.
+ Node* LoadRegister(Node* reg_index);
+ Node* StoreRegister(Node* value, Node* reg_index);
+
+ // Constants.
+ Node* Int32Constant(int value);
+ Node* NumberConstant(double value);
+ Node* HeapConstant(Unique<HeapObject> object);
+
+ // Tag and untag Smi values.
+ Node* SmiTag(Node* value);
+ Node* SmiUntag(Node* value);
+
+ // Returns from the function.
+ void Return();
+
+ // Dispatch to the bytecode.
+ void Dispatch();
+
+ protected:
+ // Close the graph.
+ void End();
+
+ // Protected helpers (for testing) which delegate to RawMachineAssembler.
+ CallDescriptor* call_descriptor() const;
+ Graph* graph();
+
+ private:
+ // Returns a raw pointer to start of the register file on the stack.
+ Node* RegisterFileRawPointer();
+ // Returns a tagged pointer to the current function's BytecodeArray object.
+ Node* BytecodeArrayTaggedPointer();
+ // Returns the offset from the BytecodeArrayPointer of the current bytecode.
+ Node* BytecodeOffset();
+ // Returns a pointer to first entry in the interpreter dispatch table.
+ Node* DispatchTableRawPointer();
+
+ // Returns the offset of register |index| relative to RegisterFilePointer().
+ Node* RegisterFrameOffset(Node* index);
+
+ Node* SmiShiftBitsConstant();
+ Node* BytecodeOperand(int operand_index);
+ Node* BytecodeOperandSignExtended(int operand_index);
+
+ // Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
+ // update BytecodeOffset() itself.
+ Node* Advance(int delta);
+
+ // Sets the end node of the graph.
+ void SetEndInput(Node* input);
+
+ // Private helpers which delegate to RawMachineAssembler.
+ Isolate* isolate();
+ Schedule* schedule();
+
+ interpreter::Bytecode bytecode_;
+ base::SmartPointer<RawMachineAssembler> raw_assembler_;
+ Node* end_node_;
+ Node* accumulator_;
+ bool code_generated_;
+
+ DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_INTERPRETER_ASSEMBLER_H_
diff --git a/deps/v8/src/compiler/js-context-relaxation.cc b/deps/v8/src/compiler/js-context-relaxation.cc
new file mode 100644
index 0000000000..0ca3c0c9d3
--- /dev/null
+++ b/deps/v8/src/compiler/js-context-relaxation.cc
@@ -0,0 +1,67 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/frame-states.h"
+#include "src/compiler/js-context-relaxation.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction JSContextRelaxation::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSCallFunction:
+ case IrOpcode::kJSToNumber: {
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* outer_frame = frame_state;
+ Node* original_context = NodeProperties::GetContextInput(node);
+ Node* candidate_new_context = original_context;
+ do {
+ FrameStateInfo frame_state_info(
+ OpParameter<FrameStateInfo>(outer_frame->op()));
+ const FrameStateFunctionInfo* function_info =
+ frame_state_info.function_info();
+ if (function_info == nullptr ||
+ (function_info->context_calling_mode() ==
+ CALL_CHANGES_NATIVE_CONTEXT)) {
+ break;
+ }
+ candidate_new_context = outer_frame->InputAt(kFrameStateContextInput);
+ outer_frame = outer_frame->InputAt(kFrameStateOuterStateInput);
+ } while (outer_frame->opcode() == IrOpcode::kFrameState);
+
+ while (true) {
+ switch (candidate_new_context->opcode()) {
+ case IrOpcode::kParameter:
+ case IrOpcode::kJSCreateModuleContext:
+ case IrOpcode::kJSCreateScriptContext:
+ if (candidate_new_context != original_context) {
+ NodeProperties::ReplaceContextInput(node, candidate_new_context);
+ return Changed(node);
+ } else {
+ return NoChange();
+ }
+ case IrOpcode::kJSCreateCatchContext:
+ case IrOpcode::kJSCreateWithContext:
+ case IrOpcode::kJSCreateBlockContext:
+ candidate_new_context =
+ NodeProperties::GetContextInput(candidate_new_context);
+ break;
+ default:
+ return NoChange();
+ }
+ }
+ }
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-context-relaxation.h b/deps/v8/src/compiler/js-context-relaxation.h
new file mode 100644
index 0000000000..4320e92391
--- /dev/null
+++ b/deps/v8/src/compiler/js-context-relaxation.h
@@ -0,0 +1,32 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_CONTEXT_RELAXATION_H_
+#define V8_COMPILER_JS_CONTEXT_RELAXATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Ensures that operations that only need to access the native context use the
+// outer-most context rather than the specific context given by the AST graph
+// builder. This makes it possible to use these operations with context
+// specialization (e.g. for generating stubs) without forcing inner contexts to
+// be embedded in generated code thus causing leaks and potentially using the
+// wrong native context (i.e. stubs are shared between native contexts).
+class JSContextRelaxation final : public Reducer {
+ public:
+ JSContextRelaxation() {}
+ ~JSContextRelaxation() final {}
+
+ Reduction Reduce(Node* node) final;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_CONTEXT_RELAXATION_H_
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index e4d4d80f52..07746fa98b 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -17,8 +17,6 @@ namespace compiler {
Reduction JSContextSpecialization::Reduce(Node* node) {
switch (node->opcode()) {
- case IrOpcode::kParameter:
- return ReduceParameter(node);
case IrOpcode::kJSLoadContext:
return ReduceJSLoadContext(node);
case IrOpcode::kJSStoreContext:
@@ -30,37 +28,43 @@ Reduction JSContextSpecialization::Reduce(Node* node) {
}
-Reduction JSContextSpecialization::ReduceParameter(Node* node) {
- DCHECK_EQ(IrOpcode::kParameter, node->opcode());
- Node* const start = NodeProperties::GetValueInput(node, 0);
- DCHECK_EQ(IrOpcode::kStart, start->opcode());
- int const index = ParameterIndexOf(node->op());
- // The context is always the last parameter to a JavaScript function, and
- // {Parameter} indices start at -1, so value outputs of {Start} look like
- // this: closure, receiver, param0, ..., paramN, context.
- if (index == start->op()->ValueOutputCount() - 2) {
- Handle<Context> context_constant;
- if (context().ToHandle(&context_constant)) {
- return Replace(jsgraph()->Constant(context_constant));
+MaybeHandle<Context> JSContextSpecialization::GetSpecializationContext(
+ Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kJSLoadContext ||
+ node->opcode() == IrOpcode::kJSStoreContext);
+ Node* const object = NodeProperties::GetValueInput(node, 0);
+ switch (object->opcode()) {
+ case IrOpcode::kHeapConstant:
+ return Handle<Context>::cast(
+ OpParameter<Unique<HeapObject>>(object).handle());
+ case IrOpcode::kParameter: {
+ Node* const start = NodeProperties::GetValueInput(object, 0);
+ DCHECK_EQ(IrOpcode::kStart, start->opcode());
+ int const index = ParameterIndexOf(object->op());
+ // The context is always the last parameter to a JavaScript function, and
+ // {Parameter} indices start at -1, so value outputs of {Start} look like
+ // this: closure, receiver, param0, ..., paramN, context.
+ if (index == start->op()->ValueOutputCount() - 2) {
+ return context();
+ }
+ break;
}
+ default:
+ break;
}
- return NoChange();
+ return MaybeHandle<Context>();
}
Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
- HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
- // If the context is not constant, no reduction can occur.
- if (!m.HasValue()) {
- return NoChange();
- }
-
- const ContextAccess& access = ContextAccessOf(node->op());
+ // Get the specialization context from the node.
+ Handle<Context> context;
+ if (!GetSpecializationContext(node).ToHandle(&context)) return NoChange();
// Find the right parent context.
- Handle<Context> context = Handle<Context>::cast(m.Value().handle());
+ const ContextAccess& access = ContextAccessOf(node->op());
for (size_t i = access.depth(); i > 0; --i) {
context = handle(context->previous(), isolate());
}
@@ -100,21 +104,17 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
- HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
- // If the context is not constant, no reduction can occur.
- if (!m.HasValue()) {
- return NoChange();
- }
-
- const ContextAccess& access = ContextAccessOf(node->op());
+ // Get the specialization context from the node.
+ Handle<Context> context;
+ if (!GetSpecializationContext(node).ToHandle(&context)) return NoChange();
// The access does not have to look up a parent, nothing to fold.
+ const ContextAccess& access = ContextAccessOf(node->op());
if (access.depth() == 0) {
return NoChange();
}
// Find the right parent context.
- Handle<Context> context = Handle<Context>::cast(m.Value().handle());
for (size_t i = access.depth(); i > 0; --i) {
context = handle(context->previous(), isolate());
}
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index 2ede6b5e17..ef784fc442 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -27,10 +27,12 @@ class JSContextSpecialization final : public AdvancedReducer {
Reduction Reduce(Node* node) final;
private:
- Reduction ReduceParameter(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSStoreContext(Node* node);
+ // Returns the {Context} to specialize {node} to (if any).
+ MaybeHandle<Context> GetSpecializationContext(Node* node);
+
Isolate* isolate() const;
JSOperatorBuilder* javascript() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/js-frame-specialization.h b/deps/v8/src/compiler/js-frame-specialization.h
index c6fc561c5c..90b3ca5e39 100644
--- a/deps/v8/src/compiler/js-frame-specialization.h
+++ b/deps/v8/src/compiler/js-frame-specialization.h
@@ -9,6 +9,10 @@
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class JavaScriptFrame;
+
namespace compiler {
// Forward declarations.
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index da42aba523..bec199e0e3 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -307,7 +307,9 @@ void JSGenericLowering::LowerJSToName(Node* node) {
void JSGenericLowering::LowerJSToObject(Node* node) {
- ReplaceWithBuiltinCall(node, Builtins::TO_OBJECT, 1);
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ Callable callable = CodeFactory::ToObject(isolate());
+ ReplaceWithStubCall(node, callable, flags);
}
@@ -325,7 +327,7 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
const LoadNamedParameters& p = LoadNamedParametersOf(node->op());
Callable callable = CodeFactory::LoadICInOptimizedCode(
- isolate(), p.contextual_mode(), p.language_mode(), UNINITIALIZED);
+ isolate(), NOT_INSIDE_TYPEOF, p.language_mode(), UNINITIALIZED);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
ReplaceWithStubCall(node, callable, flags);
@@ -334,12 +336,24 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- const LoadNamedParameters& p = LoadGlobalParametersOf(node->op());
- Callable callable = CodeFactory::LoadICInOptimizedCode(
- isolate(), p.contextual_mode(), SLOPPY, UNINITIALIZED);
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
- ReplaceWithStubCall(node, callable, flags);
+ const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
+ if (p.slot_index() >= 0) {
+ Callable callable = CodeFactory::LoadGlobalViaContext(isolate(), 0);
+ Node* script_context = node->InputAt(0);
+ node->ReplaceInput(0, jsgraph()->Int32Constant(p.slot_index()));
+ node->ReplaceInput(1, script_context); // Set new context...
+ node->RemoveInput(2);
+ node->RemoveInput(2); // ...instead of old one.
+ ReplaceWithStubCall(node, callable, flags);
+
+ } else {
+ Callable callable = CodeFactory::LoadICInOptimizedCode(
+ isolate(), p.typeof_mode(), SLOPPY, UNINITIALIZED);
+ node->RemoveInput(0); // script context
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ ReplaceWithStubCall(node, callable, flags);
+ }
}
@@ -347,10 +361,14 @@ void JSGenericLowering::LowerJSStoreProperty(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
const StorePropertyParameters& p = StorePropertyParametersOf(node->op());
LanguageMode language_mode = OpParameter<LanguageMode>(node);
+ // We have a special case where we do keyed stores but don't have a type
+ // feedback vector slot allocated to support it. In this case, install
+ // the megamorphic keyed store stub which needs neither vector nor slot.
+ bool use_vector_slot = FLAG_vector_stores && p.feedback().index() != -1;
Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), language_mode, UNINITIALIZED);
- if (FLAG_vector_stores) {
- DCHECK(p.feedback().index() != -1);
+ isolate(), language_mode,
+ (use_vector_slot || !FLAG_vector_stores) ? UNINITIALIZED : MEGAMORPHIC);
+ if (use_vector_slot) {
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
} else {
node->RemoveInput(3);
@@ -379,24 +397,42 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) {
void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- const StoreNamedParameters& p = StoreGlobalParametersOf(node->op());
- Callable callable = CodeFactory::StoreIC(isolate(), p.language_mode());
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- if (FLAG_vector_stores) {
- DCHECK(p.feedback().index() != -1);
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
- } else {
+ const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
+ if (p.slot_index() >= 0) {
+ Callable callable =
+ CodeFactory::StoreGlobalViaContext(isolate(), 0, p.language_mode());
+ Node* script_context = node->InputAt(0);
+ Node* value = node->InputAt(2);
+ node->ReplaceInput(0, jsgraph()->Int32Constant(p.slot_index()));
+ node->ReplaceInput(1, value);
+ node->ReplaceInput(2, script_context); // Set new context...
node->RemoveInput(3);
+ node->RemoveInput(3); // ...instead of old one.
+ ReplaceWithStubCall(node, callable, flags);
+
+ } else {
+ Callable callable = CodeFactory::StoreICInOptimizedCode(
+ isolate(), p.language_mode(), UNINITIALIZED);
+ node->RemoveInput(0); // script context
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ if (FLAG_vector_stores) {
+ DCHECK(p.feedback().index() != -1);
+ node->InsertInput(zone(), 3,
+ jsgraph()->SmiConstant(p.feedback().index()));
+ } else {
+ node->RemoveInput(3);
+ }
+ ReplaceWithStubCall(node, callable,
+ CallDescriptor::kPatchableCallSite | flags);
}
- ReplaceWithStubCall(node, callable,
- CallDescriptor::kPatchableCallSite | flags);
}
void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
LanguageMode language_mode = OpParameter<LanguageMode>(node);
- ReplaceWithBuiltinCall(node, Builtins::DELETE, 3);
- node->InsertInput(zone(), 4, jsgraph()->SmiConstant(language_mode));
+ ReplaceWithRuntimeCall(node, is_strict(language_mode)
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy);
}
@@ -455,8 +491,9 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) {
void JSGenericLowering::LowerJSLoadDynamicGlobal(Node* node) {
const DynamicGlobalAccess& access = DynamicGlobalAccessOf(node->op());
Runtime::FunctionId function_id =
- (access.mode() == CONTEXTUAL) ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotNoReferenceError;
+ (access.typeof_mode() == NOT_INSIDE_TYPEOF)
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotNoReferenceError;
Node* projection = graph()->NewNode(common()->Projection(0), node);
NodeProperties::ReplaceUses(node, projection, node, node, node);
node->RemoveInput(NodeProperties::FirstFrameStateIndex(node) + 1);
@@ -508,17 +545,20 @@ void JSGenericLowering::LowerJSCreateCatchContext(Node* node) {
void JSGenericLowering::LowerJSCallConstruct(Node* node) {
int arity = OpParameter<int>(node);
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+ CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
CallDescriptor* desc =
- Linkage::GetStubCallDescriptor(isolate(), zone(), d, arity, flags);
+ Linkage::GetStubCallDescriptor(isolate(), zone(), d, arity - 1, flags);
Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
- Node* construct = NodeProperties::GetValueInput(node, 0);
+ Node* actual_construct = NodeProperties::GetValueInput(node, 0);
+ Node* original_construct = NodeProperties::GetValueInput(node, arity - 1);
+ node->RemoveInput(arity - 1); // Drop original constructor.
node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 1, jsgraph()->Int32Constant(arity - 1));
- node->InsertInput(zone(), 2, construct);
- node->InsertInput(zone(), 3, jsgraph()->UndefinedConstant());
+ node->InsertInput(zone(), 1, jsgraph()->Int32Constant(arity - 2));
+ node->InsertInput(zone(), 2, actual_construct);
+ node->InsertInput(zone(), 3, original_construct);
+ node->InsertInput(zone(), 4, jsgraph()->UndefinedConstant());
node->set_op(common()->Call(desc));
}
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 88d91718e5..a4f3e1d03e 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -13,7 +13,7 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/parser.h"
#include "src/rewriter.h"
#include "src/scopes.h"
@@ -214,7 +214,8 @@ Node* JSInliner::CreateArgumentsAdaptorFrameState(
const FrameStateFunctionInfo* state_info =
jsgraph_->common()->CreateFrameStateFunctionInfo(
FrameStateType::kArgumentsAdaptor,
- static_cast<int>(call->formal_arguments()) + 1, 0, shared_info);
+ static_cast<int>(call->formal_arguments()) + 1, 0, shared_info,
+ CALL_MAINTAINS_NATIVE_CONTEXT);
const Operator* op = jsgraph_->common()->FrameState(
BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
@@ -249,6 +250,14 @@ Reduction JSInliner::Reduce(Node* node) {
return NoChange();
}
+ if (function->shared()->HasDebugInfo()) {
+ // Function contains break points.
+ TRACE("Not inlining %s into %s because callee may contain break points\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+
// Disallow cross native-context inlining for now. This means that all parts
// of the resulting code will operate on the same global object.
// This also prevents cross context leaks for asm.js code, where we could
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 8f04fc1bcd..e82ac205cc 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -94,6 +94,8 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceGetTypeFeedbackVector(node);
case Runtime::kInlineGetCallerJSFunction:
return ReduceGetCallerJSFunction(node);
+ case Runtime::kInlineToObject:
+ return ReduceToObject(node);
case Runtime::kInlineThrowNotDateError:
return ReduceThrowNotDateError(node);
case Runtime::kInlineCallFunction:
@@ -178,7 +180,7 @@ Reduction JSIntrinsicLowering::ReduceIncrementStatsCounter(Node* node) {
if (!m.HasValue() || !m.Value().handle()->IsString()) {
return ChangeToUndefined(node);
}
- SmartArrayPointer<char> name =
+ base::SmartArrayPointer<char> name =
Handle<String>::cast(m.Value().handle())->ToCString();
StatsCounter counter(jsgraph()->isolate(), name.get());
if (!counter.Enabled()) return ChangeToUndefined(node);
@@ -528,6 +530,12 @@ Reduction JSIntrinsicLowering::ReduceThrowNotDateError(Node* node) {
}
+Reduction JSIntrinsicLowering::ReduceToObject(Node* node) {
+ node->set_op(javascript()->ToObject());
+ return Changed(node);
+}
+
+
Reduction JSIntrinsicLowering::ReduceCallFunction(Node* node) {
CallRuntimeParameters params = OpParameter<CallRuntimeParameters>(node->op());
size_t arity = params.arity();
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 816defbf58..c14882c734 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -58,6 +58,7 @@ class JSIntrinsicLowering final : public AdvancedReducer {
Reduction ReduceGetTypeFeedbackVector(Node* node);
Reduction ReduceGetCallerJSFunction(Node* node);
Reduction ReduceThrowNotDateError(Node* node);
+ Reduction ReduceToObject(Node* node);
Reduction ReduceCallFunction(Node* node);
Reduction Change(Node* node, const Operator* op);
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 1966724a86..6a5bdfd692 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -113,11 +113,11 @@ ContextAccess const& ContextAccessOf(Operator const* op) {
DynamicGlobalAccess::DynamicGlobalAccess(const Handle<String>& name,
uint32_t check_bitset,
const VectorSlotPair& feedback,
- ContextualMode mode)
+ TypeofMode typeof_mode)
: name_(name),
check_bitset_(check_bitset),
feedback_(feedback),
- mode_(mode) {
+ typeof_mode_(typeof_mode) {
DCHECK(check_bitset == kFullCheckRequired || check_bitset < 0x80000000U);
}
@@ -143,7 +143,7 @@ size_t hash_value(DynamicGlobalAccess const& access) {
std::ostream& operator<<(std::ostream& os, DynamicGlobalAccess const& access) {
return os << Brief(*access.name()) << ", " << access.check_bitset() << ", "
- << access.mode();
+ << access.typeof_mode();
}
@@ -198,7 +198,6 @@ bool operator==(LoadNamedParameters const& lhs,
LoadNamedParameters const& rhs) {
return lhs.name() == rhs.name() &&
lhs.language_mode() == rhs.language_mode() &&
- lhs.contextual_mode() == rhs.contextual_mode() &&
lhs.feedback() == rhs.feedback();
}
@@ -210,14 +209,12 @@ bool operator!=(LoadNamedParameters const& lhs,
size_t hash_value(LoadNamedParameters const& p) {
- return base::hash_combine(p.name(), p.language_mode(), p.contextual_mode(),
- p.feedback());
+ return base::hash_combine(p.name(), p.language_mode(), p.feedback());
}
std::ostream& operator<<(std::ostream& os, LoadNamedParameters const& p) {
- return os << Brief(*p.name().handle()) << ", " << p.language_mode() << ", "
- << p.contextual_mode();
+ return os << Brief(*p.name().handle()) << ", " << p.language_mode();
}
@@ -256,9 +253,66 @@ const LoadNamedParameters& LoadNamedParametersOf(const Operator* op) {
}
-const LoadNamedParameters& LoadGlobalParametersOf(const Operator* op) {
+bool operator==(LoadGlobalParameters const& lhs,
+ LoadGlobalParameters const& rhs) {
+ return lhs.name() == rhs.name() && lhs.feedback() == rhs.feedback() &&
+ lhs.typeof_mode() == rhs.typeof_mode() &&
+ lhs.slot_index() == rhs.slot_index();
+}
+
+
+bool operator!=(LoadGlobalParameters const& lhs,
+ LoadGlobalParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(LoadGlobalParameters const& p) {
+ return base::hash_combine(p.name(), p.typeof_mode(), p.slot_index());
+}
+
+
+std::ostream& operator<<(std::ostream& os, LoadGlobalParameters const& p) {
+ return os << Brief(*p.name().handle()) << ", " << p.typeof_mode()
+ << ", slot: " << p.slot_index();
+}
+
+
+const LoadGlobalParameters& LoadGlobalParametersOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kJSLoadGlobal, op->opcode());
- return OpParameter<LoadNamedParameters>(op);
+ return OpParameter<LoadGlobalParameters>(op);
+}
+
+
+bool operator==(StoreGlobalParameters const& lhs,
+ StoreGlobalParameters const& rhs) {
+ return lhs.language_mode() == rhs.language_mode() &&
+ lhs.name() == rhs.name() && lhs.feedback() == rhs.feedback() &&
+ lhs.slot_index() == rhs.slot_index();
+}
+
+
+bool operator!=(StoreGlobalParameters const& lhs,
+ StoreGlobalParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(StoreGlobalParameters const& p) {
+ return base::hash_combine(p.language_mode(), p.name(), p.feedback(),
+ p.slot_index());
+}
+
+
+std::ostream& operator<<(std::ostream& os, StoreGlobalParameters const& p) {
+ return os << p.language_mode() << ", " << Brief(*p.name().handle())
+ << ", slot: " << p.slot_index();
+}
+
+
+const StoreGlobalParameters& StoreGlobalParametersOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSStoreGlobal, op->opcode());
+ return OpParameter<StoreGlobalParameters>(op);
}
@@ -291,12 +345,6 @@ const StoreNamedParameters& StoreNamedParametersOf(const Operator* op) {
}
-const StoreNamedParameters& StoreGlobalParametersOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSStoreGlobal, op->opcode());
- return OpParameter<StoreNamedParameters>(op);
-}
-
-
bool operator==(StorePropertyParameters const& lhs,
StorePropertyParameters const& rhs) {
return lhs.language_mode() == rhs.language_mode() &&
@@ -464,7 +512,7 @@ CACHED_OP_LIST(CACHED)
return &cache_.k##Name##StrictOperator; \
case STRONG: \
return &cache_.k##Name##StrongOperator; \
- case STRONG_BIT: \
+ default: \
break; /* %*!%^$#@ */ \
} \
UNREACHABLE(); \
@@ -514,7 +562,7 @@ const Operator* JSOperatorBuilder::CallConstruct(int arguments) {
const Operator* JSOperatorBuilder::LoadNamed(const Unique<Name>& name,
const VectorSlotPair& feedback,
LanguageMode language_mode) {
- LoadNamedParameters parameters(name, feedback, language_mode, NOT_CONTEXTUAL);
+ LoadNamedParameters parameters(name, feedback, language_mode);
return new (zone()) Operator1<LoadNamedParameters>( // --
IrOpcode::kJSLoadNamed, Operator::kNoProperties, // opcode
"JSLoadNamed", // name
@@ -568,24 +616,26 @@ const Operator* JSOperatorBuilder::DeleteProperty(LanguageMode language_mode) {
const Operator* JSOperatorBuilder::LoadGlobal(const Unique<Name>& name,
const VectorSlotPair& feedback,
- ContextualMode contextual_mode) {
- LoadNamedParameters parameters(name, feedback, SLOPPY, contextual_mode);
- return new (zone()) Operator1<LoadNamedParameters>( // --
+ TypeofMode typeof_mode,
+ int slot_index) {
+ LoadGlobalParameters parameters(name, feedback, typeof_mode, slot_index);
+ return new (zone()) Operator1<LoadGlobalParameters>( // --
IrOpcode::kJSLoadGlobal, Operator::kNoProperties, // opcode
"JSLoadGlobal", // name
- 2, 1, 1, 1, 1, 2, // counts
+ 3, 1, 1, 1, 1, 2, // counts
parameters); // parameter
}
const Operator* JSOperatorBuilder::StoreGlobal(LanguageMode language_mode,
const Unique<Name>& name,
- const VectorSlotPair& feedback) {
- StoreNamedParameters parameters(language_mode, feedback, name);
- return new (zone()) Operator1<StoreNamedParameters>( // --
+ const VectorSlotPair& feedback,
+ int slot_index) {
+ StoreGlobalParameters parameters(language_mode, feedback, name, slot_index);
+ return new (zone()) Operator1<StoreGlobalParameters>( // --
IrOpcode::kJSStoreGlobal, Operator::kNoProperties, // opcode
"JSStoreGlobal", // name
- 3, 1, 1, 0, 1, 2, // counts
+ 4, 1, 1, 0, 1, 2, // counts
parameters); // parameter
}
@@ -615,8 +665,8 @@ const Operator* JSOperatorBuilder::StoreContext(size_t depth, size_t index) {
const Operator* JSOperatorBuilder::LoadDynamicGlobal(
const Handle<String>& name, uint32_t check_bitset,
- const VectorSlotPair& feedback, ContextualMode mode) {
- DynamicGlobalAccess access(name, check_bitset, feedback, mode);
+ const VectorSlotPair& feedback, TypeofMode typeof_mode) {
+ DynamicGlobalAccess access(name, check_bitset, feedback, typeof_mode);
return new (zone()) Operator1<DynamicGlobalAccess>( // --
IrOpcode::kJSLoadDynamicGlobal, Operator::kNoProperties, // opcode
"JSLoadDynamicGlobal", // name
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index d70c8e2096..5afbfdf6fa 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -158,12 +158,12 @@ ContextAccess const& ContextAccessOf(Operator const*);
class DynamicGlobalAccess final {
public:
DynamicGlobalAccess(const Handle<String>& name, uint32_t check_bitset,
- const VectorSlotPair& feedback, ContextualMode mode);
+ const VectorSlotPair& feedback, TypeofMode typeof_mode);
const Handle<String>& name() const { return name_; }
uint32_t check_bitset() const { return check_bitset_; }
const VectorSlotPair& feedback() const { return feedback_; }
- ContextualMode mode() const { return mode_; }
+ TypeofMode typeof_mode() const { return typeof_mode_; }
// Indicates that an inline check is disabled.
bool RequiresFullCheck() const {
@@ -180,7 +180,7 @@ class DynamicGlobalAccess final {
const Handle<String> name_;
const uint32_t check_bitset_;
const VectorSlotPair feedback_;
- const ContextualMode mode_;
+ const TypeofMode typeof_mode_;
};
size_t hash_value(DynamicGlobalAccess const&);
@@ -233,20 +233,15 @@ DynamicContextAccess const& DynamicContextAccessOf(Operator const*);
// Defines the property being loaded from an object by a named load. This is
-// used as a parameter by JSLoadNamed and JSLoadGlobal operators.
+// used as a parameter by JSLoadNamed operators.
class LoadNamedParameters final {
public:
LoadNamedParameters(const Unique<Name>& name, const VectorSlotPair& feedback,
- LanguageMode language_mode,
- ContextualMode contextual_mode)
- : name_(name),
- feedback_(feedback),
- language_mode_(language_mode),
- contextual_mode_(contextual_mode) {}
+ LanguageMode language_mode)
+ : name_(name), feedback_(feedback), language_mode_(language_mode) {}
const Unique<Name>& name() const { return name_; }
LanguageMode language_mode() const { return language_mode_; }
- ContextualMode contextual_mode() const { return contextual_mode_; }
const VectorSlotPair& feedback() const { return feedback_; }
@@ -254,7 +249,6 @@ class LoadNamedParameters final {
const Unique<Name> name_;
const VectorSlotPair feedback_;
const LanguageMode language_mode_;
- const ContextualMode contextual_mode_;
};
bool operator==(LoadNamedParameters const&, LoadNamedParameters const&);
@@ -266,7 +260,74 @@ std::ostream& operator<<(std::ostream&, LoadNamedParameters const&);
const LoadNamedParameters& LoadNamedParametersOf(const Operator* op);
-const LoadNamedParameters& LoadGlobalParametersOf(const Operator* op);
+
+// Defines the property being loaded from an object by a named load. This is
+// used as a parameter by JSLoadGlobal operator.
+class LoadGlobalParameters final {
+ public:
+ LoadGlobalParameters(const Unique<Name>& name, const VectorSlotPair& feedback,
+ TypeofMode typeof_mode, int slot_index)
+ : name_(name),
+ feedback_(feedback),
+ typeof_mode_(typeof_mode),
+ slot_index_(slot_index) {}
+
+ const Unique<Name>& name() const { return name_; }
+ TypeofMode typeof_mode() const { return typeof_mode_; }
+
+ const VectorSlotPair& feedback() const { return feedback_; }
+
+ int slot_index() const { return slot_index_; }
+
+ private:
+ const Unique<Name> name_;
+ const VectorSlotPair feedback_;
+ const TypeofMode typeof_mode_;
+ const int slot_index_;
+};
+
+bool operator==(LoadGlobalParameters const&, LoadGlobalParameters const&);
+bool operator!=(LoadGlobalParameters const&, LoadGlobalParameters const&);
+
+size_t hash_value(LoadGlobalParameters const&);
+
+std::ostream& operator<<(std::ostream&, LoadGlobalParameters const&);
+
+const LoadGlobalParameters& LoadGlobalParametersOf(const Operator* op);
+
+
+// Defines the property being stored to an object by a named store. This is
+// used as a parameter by JSStoreGlobal operator.
+class StoreGlobalParameters final {
+ public:
+ StoreGlobalParameters(LanguageMode language_mode,
+ const VectorSlotPair& feedback,
+ const Unique<Name>& name, int slot_index)
+ : language_mode_(language_mode),
+ name_(name),
+ feedback_(feedback),
+ slot_index_(slot_index) {}
+
+ LanguageMode language_mode() const { return language_mode_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
+ const Unique<Name>& name() const { return name_; }
+ int slot_index() const { return slot_index_; }
+
+ private:
+ const LanguageMode language_mode_;
+ const Unique<Name> name_;
+ const VectorSlotPair feedback_;
+ int slot_index_;
+};
+
+bool operator==(StoreGlobalParameters const&, StoreGlobalParameters const&);
+bool operator!=(StoreGlobalParameters const&, StoreGlobalParameters const&);
+
+size_t hash_value(StoreGlobalParameters const&);
+
+std::ostream& operator<<(std::ostream&, StoreGlobalParameters const&);
+
+const StoreGlobalParameters& StoreGlobalParametersOf(const Operator* op);
// Defines the property being loaded from an object. This is
@@ -297,7 +358,7 @@ const LoadPropertyParameters& LoadPropertyParametersOf(const Operator* op);
// Defines the property being stored to an object by a named store. This is
-// used as a parameter by JSStoreNamed and JSStoreGlobal operators.
+// used as a parameter by JSStoreNamed operator.
class StoreNamedParameters final {
public:
StoreNamedParameters(LanguageMode language_mode,
@@ -323,8 +384,6 @@ std::ostream& operator<<(std::ostream&, StoreNamedParameters const&);
const StoreNamedParameters& StoreNamedParametersOf(const Operator* op);
-const StoreNamedParameters& StoreGlobalParametersOf(const Operator* op);
-
// Defines the property being stored to an object. This is used as a parameter
// by JSStoreProperty operators.
@@ -445,10 +504,12 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* LoadGlobal(const Unique<Name>& name,
const VectorSlotPair& feedback,
- ContextualMode contextual_mode = NOT_CONTEXTUAL);
+ TypeofMode typeof_mode = NOT_INSIDE_TYPEOF,
+ int slot_index = -1);
const Operator* StoreGlobal(LanguageMode language_mode,
const Unique<Name>& name,
- const VectorSlotPair& feedback);
+ const VectorSlotPair& feedback,
+ int slot_index = -1);
const Operator* LoadContext(size_t depth, size_t index, bool immutable);
const Operator* StoreContext(size_t depth, size_t index);
@@ -456,7 +517,7 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* LoadDynamicGlobal(const Handle<String>& name,
uint32_t check_bitset,
const VectorSlotPair& feedback,
- ContextualMode mode);
+ TypeofMode typeof_mode);
const Operator* LoadDynamicContext(const Handle<String>& name,
uint32_t check_bitset, size_t depth,
size_t index);
diff --git a/deps/v8/src/compiler/js-type-feedback-lowering.cc b/deps/v8/src/compiler/js-type-feedback-lowering.cc
new file mode 100644
index 0000000000..2522a7af07
--- /dev/null
+++ b/deps/v8/src/compiler/js-type-feedback-lowering.cc
@@ -0,0 +1,118 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-type-feedback-lowering.h"
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+JSTypeFeedbackLowering::JSTypeFeedbackLowering(Editor* editor, Flags flags,
+ JSGraph* jsgraph)
+ : AdvancedReducer(editor),
+ flags_(flags),
+ jsgraph_(jsgraph),
+ simplified_(graph()->zone()) {}
+
+
+Reduction JSTypeFeedbackLowering::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSLoadNamed:
+ return ReduceJSLoadNamed(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction JSTypeFeedbackLowering::ReduceJSLoadNamed(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Type* receiver_type = NodeProperties::GetBounds(receiver).upper;
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ // We need to make optimistic assumptions to continue.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ LoadNamedParameters const& p = LoadNamedParametersOf(node->op());
+ Handle<TypeFeedbackVector> vector;
+ if (!p.feedback().vector().ToHandle(&vector)) return NoChange();
+ if (p.name().handle().is_identical_to(factory()->length_string())) {
+ LoadICNexus nexus(vector, p.feedback().slot());
+ MapHandleList maps;
+ if (nexus.ExtractMaps(&maps) > 0) {
+ for (Handle<Map> map : maps) {
+ if (map->instance_type() >= FIRST_NONSTRING_TYPE) return NoChange();
+ }
+ // Optimistic optimization for "length" property of strings.
+ if (receiver_type->Maybe(Type::TaggedSigned())) {
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, control);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
+ effect, if_true);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfFalse(), branch);
+ }
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+ Node* receiver_instance_type = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ receiver_map, effect, control);
+ Node* check =
+ graph()->NewNode(machine()->Uint32LessThan(), receiver_instance_type,
+ jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
+ effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+ Node* value = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForStringLength(graph()->zone())),
+ receiver, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ }
+ return NoChange();
+}
+
+
+Factory* JSTypeFeedbackLowering::factory() const {
+ return isolate()->factory();
+}
+
+
+CommonOperatorBuilder* JSTypeFeedbackLowering::common() const {
+ return jsgraph()->common();
+}
+
+
+Graph* JSTypeFeedbackLowering::graph() const { return jsgraph()->graph(); }
+
+
+Isolate* JSTypeFeedbackLowering::isolate() const {
+ return jsgraph()->isolate();
+}
+
+
+MachineOperatorBuilder* JSTypeFeedbackLowering::machine() const {
+ return jsgraph()->machine();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-type-feedback-lowering.h b/deps/v8/src/compiler/js-type-feedback-lowering.h
new file mode 100644
index 0000000000..db0fbdd626
--- /dev/null
+++ b/deps/v8/src/compiler/js-type-feedback-lowering.h
@@ -0,0 +1,66 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_TYPE_FEEDBACK_LOWERING_H_
+#define V8_COMPILER_JS_TYPE_FEEDBACK_LOWERING_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class Factory;
+
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class MachineOperatorBuilder;
+
+
+// Lowers JS-level operators to simplified operators based on type feedback.
+class JSTypeFeedbackLowering final : public AdvancedReducer {
+ public:
+ // Various configuration flags to control the operation of this lowering.
+ enum Flag {
+ kNoFlags = 0,
+ kDeoptimizationEnabled = 1 << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ JSTypeFeedbackLowering(Editor* editor, Flags flags, JSGraph* jsgraph);
+ ~JSTypeFeedbackLowering() final {}
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceJSLoadNamed(Node* node);
+
+ Factory* factory() const;
+ Flags flags() const { return flags_; }
+ Graph* graph() const;
+ Isolate* isolate() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ CommonOperatorBuilder* common() const;
+ MachineOperatorBuilder* machine() const;
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ Flags const flags_;
+ JSGraph* const jsgraph_;
+ SimplifiedOperatorBuilder simplified_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSTypeFeedbackLowering);
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(JSTypeFeedbackLowering::Flags)
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_TYPE_FEEDBACK_LOWERING_H_
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 051009dd6e..628e7e07b6 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -797,6 +797,27 @@ Reduction JSTypedLowering::ReduceJSLoadGlobal(Node* node) {
}
+Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Type* receiver_type = NodeProperties::GetBounds(receiver).upper;
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Handle<Name> name = LoadNamedParametersOf(node->op()).name().handle();
+ // Optimize "length" property of strings.
+ if (name.is_identical_to(factory()->length_string()) &&
+ receiver_type->Is(Type::String())) {
+ Node* value = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForStringLength(graph()->zone())),
+ receiver, effect, control);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+
Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
Node* key = NodeProperties::GetValueInput(node, 1);
Node* base = NodeProperties::GetValueInput(node, 0);
@@ -811,11 +832,10 @@ Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
size_t const k = ElementSizeLog2Of(access.machine_type());
double const byte_length = array->byte_length()->Number();
CHECK_LT(k, arraysize(shifted_int32_ranges_));
- if (IsExternalArrayElementsKind(array->map()->elements_kind()) &&
- key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
+ if (key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
// JSLoadProperty(typed-array, int32)
- Handle<ExternalArray> elements =
- Handle<ExternalArray>::cast(handle(array->elements()));
+ Handle<FixedTypedArrayBase> elements =
+ Handle<FixedTypedArrayBase>::cast(handle(array->elements()));
Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
Node* length = jsgraph()->Constant(byte_length);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -858,12 +878,11 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
size_t const k = ElementSizeLog2Of(access.machine_type());
double const byte_length = array->byte_length()->Number();
CHECK_LT(k, arraysize(shifted_int32_ranges_));
- if (IsExternalArrayElementsKind(array->map()->elements_kind()) &&
- access.external_array_type() != kExternalUint8ClampedArray &&
+ if (access.external_array_type() != kExternalUint8ClampedArray &&
key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
// JSLoadProperty(typed-array, int32)
- Handle<ExternalArray> elements =
- Handle<ExternalArray>::cast(handle(array->elements()));
+ Handle<FixedTypedArrayBase> elements =
+ Handle<FixedTypedArrayBase>::cast(handle(array->elements()));
Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
Node* length = jsgraph()->Constant(byte_length);
Node* context = NodeProperties::GetContextInput(node);
@@ -1002,14 +1021,14 @@ Reduction JSTypedLowering::ReduceJSLoadDynamicGlobal(Node* node) {
javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true), context,
context, effect);
Node* fast = graph()->NewNode(
- javascript()->LoadGlobal(name, access.feedback(), access.mode()), global,
- vector, context, state1, state2, global, check_true);
+ javascript()->LoadGlobal(name, access.feedback(), access.typeof_mode()),
+ context, global, vector, context, state1, state2, global, check_true);
// Slow case, because variable potentially shadowed. Perform dynamic lookup.
uint32_t check_bitset = DynamicGlobalAccess::kFullCheckRequired;
Node* slow = graph()->NewNode(
javascript()->LoadDynamicGlobal(access.name(), check_bitset,
- access.feedback(), access.mode()),
+ access.feedback(), access.typeof_mode()),
vector, context, context, state1, state2, effect, check_false);
// Replace value, effect and control uses accordingly.
@@ -1621,6 +1640,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSToString(node);
case IrOpcode::kJSLoadGlobal:
return ReduceJSLoadGlobal(node);
+ case IrOpcode::kJSLoadNamed:
+ return ReduceJSLoadNamed(node);
case IrOpcode::kJSLoadProperty:
return ReduceJSLoadProperty(node);
case IrOpcode::kJSStoreProperty:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 8252093d15..920f644f28 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -42,6 +42,7 @@ class JSTypedLowering final : public AdvancedReducer {
Reduction ReduceJSMultiply(Node* node);
Reduction ReduceJSComparison(Node* node);
Reduction ReduceJSLoadGlobal(Node* node);
+ Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
Reduction ReduceJSStoreProperty(Node* node);
Reduction ReduceJSLoadContext(Node* node);
diff --git a/deps/v8/src/compiler/linkage-impl.h b/deps/v8/src/compiler/linkage-impl.h
deleted file mode 100644
index 27b0235b97..0000000000
--- a/deps/v8/src/compiler/linkage-impl.h
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_LINKAGE_IMPL_H_
-#define V8_COMPILER_LINKAGE_IMPL_H_
-
-#include "src/code-stubs.h"
-#include "src/compiler/osr.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// TODO(titzer): replace uses of int with size_t in LinkageHelper.
-template <typename LinkageTraits>
-class LinkageHelper {
- public:
- static const RegList kNoCalleeSaved = 0;
-
- static void AddReturnLocations(LocationSignature::Builder* locations) {
- DCHECK(locations->return_count_ <= 2);
- if (locations->return_count_ > 0) {
- locations->AddReturn(regloc(LinkageTraits::ReturnValueReg()));
- }
- if (locations->return_count_ > 1) {
- locations->AddReturn(regloc(LinkageTraits::ReturnValue2Reg()));
- }
- }
-
- // TODO(turbofan): cache call descriptors for JSFunction calls.
- static CallDescriptor* GetJSCallDescriptor(Zone* zone, bool is_osr,
- int js_parameter_count,
- CallDescriptor::Flags flags) {
- const size_t return_count = 1;
- const size_t context_count = 1;
- const size_t parameter_count = js_parameter_count + context_count;
-
- LocationSignature::Builder locations(zone, return_count, parameter_count);
- MachineSignature::Builder types(zone, return_count, parameter_count);
-
- // Add returns.
- AddReturnLocations(&locations);
- for (size_t i = 0; i < return_count; i++) {
- types.AddReturn(kMachAnyTagged);
- }
-
- // All parameters to JS calls go on the stack.
- for (int i = 0; i < js_parameter_count; i++) {
- int spill_slot_index = i - js_parameter_count;
- locations.AddParam(stackloc(spill_slot_index));
- types.AddParam(kMachAnyTagged);
- }
- // Add context.
- locations.AddParam(regloc(LinkageTraits::ContextReg()));
- types.AddParam(kMachAnyTagged);
-
- // The target for JS function calls is the JSFunction object.
- MachineType target_type = kMachAnyTagged;
- // TODO(titzer): When entering into an OSR function from unoptimized code,
- // the JSFunction is not in a register, but it is on the stack in an
- // unaddressable spill slot. We hack this in the OSR prologue. Fix.
- LinkageLocation target_loc = regloc(LinkageTraits::JSCallFunctionReg());
- return new (zone) CallDescriptor( // --
- CallDescriptor::kCallJSFunction, // kind
- target_type, // target MachineType
- target_loc, // target location
- types.Build(), // machine_sig
- locations.Build(), // location_sig
- js_parameter_count, // js_parameter_count
- Operator::kNoProperties, // properties
- kNoCalleeSaved, // callee-saved
- kNoCalleeSaved, // callee-saved fp
- flags, // flags
- "js-call");
- }
-
-
- // TODO(turbofan): cache call descriptors for runtime calls.
- static CallDescriptor* GetRuntimeCallDescriptor(
- Zone* zone, Runtime::FunctionId function_id, int js_parameter_count,
- Operator::Properties properties) {
- const size_t function_count = 1;
- const size_t num_args_count = 1;
- const size_t context_count = 1;
- const size_t parameter_count = function_count +
- static_cast<size_t>(js_parameter_count) +
- num_args_count + context_count;
-
- const Runtime::Function* function = Runtime::FunctionForId(function_id);
- const size_t return_count = static_cast<size_t>(function->result_size);
-
- LocationSignature::Builder locations(zone, return_count, parameter_count);
- MachineSignature::Builder types(zone, return_count, parameter_count);
-
- // Add returns.
- AddReturnLocations(&locations);
- for (size_t i = 0; i < return_count; i++) {
- types.AddReturn(kMachAnyTagged);
- }
-
- // All parameters to the runtime call go on the stack.
- for (int i = 0; i < js_parameter_count; i++) {
- locations.AddParam(stackloc(i - js_parameter_count));
- types.AddParam(kMachAnyTagged);
- }
- // Add runtime function itself.
- locations.AddParam(regloc(LinkageTraits::RuntimeCallFunctionReg()));
- types.AddParam(kMachAnyTagged);
-
- // Add runtime call argument count.
- locations.AddParam(regloc(LinkageTraits::RuntimeCallArgCountReg()));
- types.AddParam(kMachPtr);
-
- // Add context.
- locations.AddParam(regloc(LinkageTraits::ContextReg()));
- types.AddParam(kMachAnyTagged);
-
- CallDescriptor::Flags flags = Linkage::FrameStateInputCount(function_id) > 0
- ? CallDescriptor::kNeedsFrameState
- : CallDescriptor::kNoFlags;
-
- // The target for runtime calls is a code object.
- MachineType target_type = kMachAnyTagged;
- LinkageLocation target_loc = LinkageLocation::AnyRegister();
- return new (zone) CallDescriptor( // --
- CallDescriptor::kCallCodeObject, // kind
- target_type, // target MachineType
- target_loc, // target location
- types.Build(), // machine_sig
- locations.Build(), // location_sig
- js_parameter_count, // js_parameter_count
- properties, // properties
- kNoCalleeSaved, // callee-saved
- kNoCalleeSaved, // callee-saved fp
- flags, // flags
- function->name); // debug name
- }
-
-
- // TODO(all): Add support for return representations/locations to
- // CallInterfaceDescriptor.
- // TODO(turbofan): cache call descriptors for code stub calls.
- static CallDescriptor* GetStubCallDescriptor(
- Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
- int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties, MachineType return_type) {
- const int register_parameter_count = descriptor.GetRegisterParameterCount();
- const int js_parameter_count =
- register_parameter_count + stack_parameter_count;
- const int context_count = 1;
- const size_t return_count = 1;
- const size_t parameter_count =
- static_cast<size_t>(js_parameter_count + context_count);
-
- LocationSignature::Builder locations(zone, return_count, parameter_count);
- MachineSignature::Builder types(zone, return_count, parameter_count);
-
- // Add return location.
- AddReturnLocations(&locations);
- types.AddReturn(return_type);
-
- // Add parameters in registers and on the stack.
- for (int i = 0; i < js_parameter_count; i++) {
- if (i < register_parameter_count) {
- // The first parameters go in registers.
- Register reg = descriptor.GetRegisterParameter(i);
- Representation rep =
- RepresentationFromType(descriptor.GetParameterType(i));
- locations.AddParam(regloc(reg));
- types.AddParam(reptyp(rep));
- } else {
- // The rest of the parameters go on the stack.
- int stack_slot = i - register_parameter_count - stack_parameter_count;
- locations.AddParam(stackloc(stack_slot));
- types.AddParam(kMachAnyTagged);
- }
- }
- // Add context.
- locations.AddParam(regloc(LinkageTraits::ContextReg()));
- types.AddParam(kMachAnyTagged);
-
- // The target for stub calls is a code object.
- MachineType target_type = kMachAnyTagged;
- LinkageLocation target_loc = LinkageLocation::AnyRegister();
- return new (zone) CallDescriptor( // --
- CallDescriptor::kCallCodeObject, // kind
- target_type, // target MachineType
- target_loc, // target location
- types.Build(), // machine_sig
- locations.Build(), // location_sig
- js_parameter_count, // js_parameter_count
- properties, // properties
- kNoCalleeSaved, // callee-saved registers
- kNoCalleeSaved, // callee-saved fp
- flags, // flags
- descriptor.DebugName(isolate));
- }
-
- static CallDescriptor* GetSimplifiedCDescriptor(
- Zone* zone, const MachineSignature* msig) {
- LocationSignature::Builder locations(zone, msig->return_count(),
- msig->parameter_count());
- // Add return location(s).
- AddReturnLocations(&locations);
-
- // Add register and/or stack parameter(s).
- const int parameter_count = static_cast<int>(msig->parameter_count());
- int stack_offset = LinkageTraits::CStackBackingStoreLength();
- for (int i = 0; i < parameter_count; i++) {
- if (i < LinkageTraits::CRegisterParametersLength()) {
- locations.AddParam(regloc(LinkageTraits::CRegisterParameter(i)));
- } else {
- locations.AddParam(stackloc(-1 - stack_offset));
- stack_offset++;
- }
- }
-
- // The target for C calls is always an address (i.e. machine pointer).
- MachineType target_type = kMachPtr;
- LinkageLocation target_loc = LinkageLocation::AnyRegister();
- return new (zone) CallDescriptor( // --
- CallDescriptor::kCallAddress, // kind
- target_type, // target MachineType
- target_loc, // target location
- msig, // machine_sig
- locations.Build(), // location_sig
- 0, // js_parameter_count
- Operator::kNoProperties, // properties
- LinkageTraits::CCalleeSaveRegisters(), // callee-saved registers
- LinkageTraits::CCalleeSaveFPRegisters(), // callee-saved fp regs
- CallDescriptor::kNoFlags, // flags
- "c-call");
- }
-
- static LinkageLocation regloc(Register reg) {
- return LinkageLocation(Register::ToAllocationIndex(reg));
- }
-
- static LinkageLocation stackloc(int i) {
- DCHECK_LT(i, 0);
- return LinkageLocation(i);
- }
-
- static MachineType reptyp(Representation representation) {
- switch (representation.kind()) {
- case Representation::kInteger8:
- return kMachInt8;
- case Representation::kUInteger8:
- return kMachUint8;
- case Representation::kInteger16:
- return kMachInt16;
- case Representation::kUInteger16:
- return kMachUint16;
- case Representation::kInteger32:
- return kMachInt32;
- case Representation::kSmi:
- case Representation::kTagged:
- case Representation::kHeapObject:
- return kMachAnyTagged;
- case Representation::kDouble:
- return kMachFloat64;
- case Representation::kExternal:
- return kMachPtr;
- case Representation::kNone:
- case Representation::kNumRepresentations:
- break;
- }
- UNREACHABLE();
- return kMachNone;
- }
-};
-
-
-LinkageLocation Linkage::GetOsrValueLocation(int index) const {
- CHECK(incoming_->IsJSFunctionCall());
- int parameter_count = static_cast<int>(incoming_->JSParameterCount() - 1);
- int first_stack_slot = OsrHelper::FirstStackSlotIndex(parameter_count);
-
- if (index == kOsrContextSpillSlotIndex) {
- // Context. Use the parameter location of the context spill slot.
- // Parameter (arity + 1) is special for the context of the function frame.
- int context_index = 1 + 1 + parameter_count; // target + receiver + params
- return incoming_->GetInputLocation(context_index);
- } else if (index >= first_stack_slot) {
- // Local variable stored in this (callee) stack.
- int spill_index =
- LinkageLocation::ANY_REGISTER + 1 + index - first_stack_slot;
- // TODO(titzer): bailout instead of crashing here.
- CHECK(spill_index <= LinkageLocation::MAX_STACK_SLOT);
- return LinkageLocation(spill_index);
- } else {
- // Parameter. Use the assigned location from the incoming call descriptor.
- int parameter_index = 1 + index; // skip index 0, which is the target.
- return incoming_->GetInputLocation(parameter_index);
- }
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_LINKAGE_IMPL_H_
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 6ef014246d..2bbedaceb2 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -5,8 +5,10 @@
#include "src/code-stubs.h"
#include "src/compiler.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/frame.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node.h"
+#include "src/compiler/osr.h"
#include "src/compiler/pipeline.h"
#include "src/scopes.h"
@@ -14,6 +16,41 @@ namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+LinkageLocation regloc(Register reg) {
+ return LinkageLocation::ForRegister(Register::ToAllocationIndex(reg));
+}
+
+
+MachineType reptyp(Representation representation) {
+ switch (representation.kind()) {
+ case Representation::kInteger8:
+ return kMachInt8;
+ case Representation::kUInteger8:
+ return kMachUint8;
+ case Representation::kInteger16:
+ return kMachInt16;
+ case Representation::kUInteger16:
+ return kMachUint16;
+ case Representation::kInteger32:
+ return kMachInt32;
+ case Representation::kSmi:
+ case Representation::kTagged:
+ case Representation::kHeapObject:
+ return kMachAnyTagged;
+ case Representation::kDouble:
+ return kMachFloat64;
+ case Representation::kExternal:
+ return kMachPtr;
+ case Representation::kNone:
+ case Representation::kNumRepresentations:
+ break;
+ }
+ UNREACHABLE();
+ return kMachNone;
+}
+} // namespace
+
std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k) {
switch (k) {
@@ -34,7 +71,7 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k) {
std::ostream& operator<<(std::ostream& os, const CallDescriptor& d) {
// TODO(svenpanne) Output properties etc. and be less cryptic.
return os << d.kind() << ":" << d.debug_name() << ":r" << d.ReturnCount()
- << "j" << d.JSParameterCount() << "i" << d.InputCount() << "f"
+ << "s" << d.StackParameterCount() << "i" << d.InputCount() << "f"
<< d.FrameStateCount() << "t" << d.SupportsTailCalls();
}
@@ -50,6 +87,25 @@ bool CallDescriptor::HasSameReturnLocationsAs(
bool CallDescriptor::CanTailCall(const Node* node) const {
+ // Determine the number of stack parameters passed in
+ size_t stack_params = 0;
+ for (size_t i = 0; i < InputCount(); ++i) {
+ if (!GetInputLocation(i).IsRegister()) {
+ ++stack_params;
+ }
+ }
+ // Ensure the input linkage contains the stack parameters in the right order
+ size_t current_stack_param = 0;
+ for (size_t i = 0; i < InputCount(); ++i) {
+ if (!GetInputLocation(i).IsRegister()) {
+ if (GetInputLocation(i) != LinkageLocation::ForCallerFrameSlot(
+ static_cast<int>(current_stack_param) -
+ static_cast<int>(stack_params))) {
+ return false;
+ }
+ ++current_stack_param;
+ }
+ }
// Tail calling is currently allowed if return locations match and all
// parameters are either in registers or on the stack but match exactly in
// number and content.
@@ -57,11 +113,10 @@ bool CallDescriptor::CanTailCall(const Node* node) const {
if (!HasSameReturnLocationsAs(other)) return false;
size_t current_input = 0;
size_t other_input = 0;
- size_t stack_parameter = 0;
while (true) {
if (other_input >= other->InputCount()) {
- while (current_input <= InputCount()) {
- if (!GetInputLocation(current_input).is_register()) {
+ while (current_input < InputCount()) {
+ if (!GetInputLocation(current_input).IsRegister()) {
return false;
}
++current_input;
@@ -70,18 +125,18 @@ bool CallDescriptor::CanTailCall(const Node* node) const {
}
if (current_input >= InputCount()) {
while (other_input < other->InputCount()) {
- if (!other->GetInputLocation(other_input).is_register()) {
+ if (!other->GetInputLocation(other_input).IsRegister()) {
return false;
}
++other_input;
}
return true;
}
- if (GetInputLocation(current_input).is_register()) {
+ if (GetInputLocation(current_input).IsRegister()) {
++current_input;
continue;
}
- if (other->GetInputLocation(other_input).is_register()) {
+ if (other->GetInputLocation(other_input).IsRegister()) {
++other_input;
continue;
}
@@ -93,11 +148,12 @@ bool CallDescriptor::CanTailCall(const Node* node) const {
if (input->opcode() != IrOpcode::kParameter) {
return false;
}
+ // Make sure that the parameter input passed through to the tail call
+ // corresponds to the correct stack slot.
size_t param_index = ParameterIndexOf(input->op());
- if (param_index != stack_parameter) {
+ if (param_index != current_input - 1) {
return false;
}
- ++stack_parameter;
++current_input;
++other_input;
}
@@ -115,11 +171,11 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
info->isolate(), zone, descriptor, stub->GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties);
}
- if (info->function() != NULL) {
+ if (info->has_literal()) {
// If we already have the function literal, use the number of parameters
// plus the receiver.
return GetJSCallDescriptor(zone, info->is_osr(),
- 1 + info->function()->parameter_count(),
+ 1 + info->literal()->parameter_count(),
CallDescriptor::kNoFlags);
}
if (!info->closure().is_null()) {
@@ -134,30 +190,22 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
}
-FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame,
- int extra) const {
- if (frame->GetSpillSlotCount() > 0 || incoming_->IsJSFunctionCall() ||
- incoming_->kind() == CallDescriptor::kCallAddress) {
- int offset;
- int register_save_area_size = frame->GetRegisterSaveAreaSize();
- if (spill_slot >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- offset =
- -(spill_slot + 1) * kPointerSize - register_save_area_size + extra;
- } else {
- // Incoming parameter. Skip the return address.
- offset = -(spill_slot + 1) * kPointerSize + kFPOnStackSize +
- kPCOnStackSize + extra;
- }
+FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame) const {
+ bool has_frame = frame->GetSpillSlotCount() > 0 ||
+ incoming_->IsJSFunctionCall() ||
+ incoming_->kind() == CallDescriptor::kCallAddress;
+ const int offset =
+ (StandardFrameConstants::kFixedSlotCountAboveFp - spill_slot - 1) *
+ kPointerSize;
+ if (has_frame) {
return FrameOffset::FromFramePointer(offset);
} else {
// No frame. Retrieve all parameters relative to stack pointer.
DCHECK(spill_slot < 0); // Must be a parameter.
- int register_save_area_size = frame->GetRegisterSaveAreaSize();
- int offset = register_save_area_size - (spill_slot + 1) * kPointerSize +
- kPCOnStackSize + extra;
- return FrameOffset::FromStackPointer(offset);
+ int offsetSpToFp =
+ kPointerSize * (StandardFrameConstants::kFixedSlotCountAboveFp -
+ frame->GetTotalFrameSlotCount());
+ return FrameOffset::FromStackPointer(offset - offsetSpToFp);
}
}
@@ -170,6 +218,7 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
switch (function) {
case Runtime::kAllocateInTargetSpace:
case Runtime::kDateField:
+ case Runtime::kFinalizeClassDefinition: // TODO(conradw): Is it safe?
case Runtime::kDefineClassMethod: // TODO(jarin): Is it safe?
case Runtime::kDefineGetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kDefineSetterPropertyUnchecked: // TODO(jarin): Is it safe?
@@ -183,7 +232,7 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
case Runtime::kPushBlockContext:
case Runtime::kPushCatchContext:
case Runtime::kReThrow:
- case Runtime::kStringCompareRT:
+ case Runtime::kStringCompare:
case Runtime::kStringEquals:
case Runtime::kToFastProperties: // TODO(jarin): Is it safe?
case Runtime::kTraceEnter:
@@ -191,9 +240,11 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
return 0;
case Runtime::kInlineArguments:
case Runtime::kInlineCallFunction:
+ case Runtime::kInlineDefaultConstructorCallSuper:
case Runtime::kInlineGetCallerJSFunction:
case Runtime::kInlineGetPrototype:
case Runtime::kInlineRegExpExec:
+ case Runtime::kInlineToObject:
return 1;
case Runtime::kInlineDeoptimizeNow:
case Runtime::kInlineThrowNotDateError:
@@ -213,56 +264,251 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
bool CallDescriptor::UsesOnlyRegisters() const {
for (size_t i = 0; i < InputCount(); ++i) {
- if (!GetInputLocation(i).is_register()) return false;
+ if (!GetInputLocation(i).IsRegister()) return false;
}
for (size_t i = 0; i < ReturnCount(); ++i) {
- if (!GetReturnLocation(i).is_register()) return false;
+ if (!GetReturnLocation(i).IsRegister()) return false;
}
return true;
}
-//==============================================================================
-// Provide unimplemented methods on unsupported architectures, to at least link.
-//==============================================================================
-#if !V8_TURBOFAN_BACKEND
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+ Zone* zone, Runtime::FunctionId function_id, int js_parameter_count,
+ Operator::Properties properties) {
+ const size_t function_count = 1;
+ const size_t num_args_count = 1;
+ const size_t context_count = 1;
+ const size_t parameter_count = function_count +
+ static_cast<size_t>(js_parameter_count) +
+ num_args_count + context_count;
+
+ const Runtime::Function* function = Runtime::FunctionForId(function_id);
+ const size_t return_count = static_cast<size_t>(function->result_size);
+
+ LocationSignature::Builder locations(zone, return_count, parameter_count);
+ MachineSignature::Builder types(zone, return_count, parameter_count);
+
+ // Add returns.
+ if (locations.return_count_ > 0) {
+ locations.AddReturn(regloc(kReturnRegister0));
+ }
+ if (locations.return_count_ > 1) {
+ locations.AddReturn(regloc(kReturnRegister1));
+ }
+ for (size_t i = 0; i < return_count; i++) {
+ types.AddReturn(kMachAnyTagged);
+ }
+
+ // All parameters to the runtime call go on the stack.
+ for (int i = 0; i < js_parameter_count; i++) {
+ locations.AddParam(
+ LinkageLocation::ForCallerFrameSlot(i - js_parameter_count));
+ types.AddParam(kMachAnyTagged);
+ }
+ // Add runtime function itself.
+ locations.AddParam(regloc(kRuntimeCallFunctionRegister));
+ types.AddParam(kMachAnyTagged);
+
+ // Add runtime call argument count.
+ locations.AddParam(regloc(kRuntimeCallArgCountRegister));
+ types.AddParam(kMachPtr);
+
+ // Add context.
+ locations.AddParam(regloc(kContextRegister));
+ types.AddParam(kMachAnyTagged);
+
+ CallDescriptor::Flags flags = Linkage::FrameStateInputCount(function_id) > 0
+ ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags;
+
+ // The target for runtime calls is a code object.
+ MachineType target_type = kMachAnyTagged;
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallCodeObject, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ types.Build(), // machine_sig
+ locations.Build(), // location_sig
+ js_parameter_count, // stack_parameter_count
+ properties, // properties
+ kNoCalleeSaved, // callee-saved
+ kNoCalleeSaved, // callee-saved fp
+ flags, // flags
+ function->name); // debug name
+}
+
+
CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
- int parameter_count,
+ int js_parameter_count,
CallDescriptor::Flags flags) {
- UNIMPLEMENTED();
- return NULL;
-}
+ const size_t return_count = 1;
+ const size_t context_count = 1;
+ const size_t parameter_count = js_parameter_count + context_count;
+ LocationSignature::Builder locations(zone, return_count, parameter_count);
+ MachineSignature::Builder types(zone, return_count, parameter_count);
-LinkageLocation Linkage::GetOsrValueLocation(int index) const {
- UNIMPLEMENTED();
- return LinkageLocation(-1); // Dummy value
+ // All JS calls have exactly one return value.
+ locations.AddReturn(regloc(kReturnRegister0));
+ types.AddReturn(kMachAnyTagged);
+
+ // All parameters to JS calls go on the stack.
+ for (int i = 0; i < js_parameter_count; i++) {
+ int spill_slot_index = i - js_parameter_count;
+ locations.AddParam(LinkageLocation::ForCallerFrameSlot(spill_slot_index));
+ types.AddParam(kMachAnyTagged);
+ }
+ // Add context.
+ locations.AddParam(regloc(kContextRegister));
+ types.AddParam(kMachAnyTagged);
+
+ // The target for JS function calls is the JSFunction object.
+ MachineType target_type = kMachAnyTagged;
+ // TODO(titzer): When entering into an OSR function from unoptimized code,
+ // the JSFunction is not in a register, but it is on the stack in an
+ // unaddressable spill slot. We hack this in the OSR prologue. Fix.
+ LinkageLocation target_loc = regloc(kJSFunctionRegister);
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallJSFunction, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ types.Build(), // machine_sig
+ locations.Build(), // location_sig
+ js_parameter_count, // stack_parameter_count
+ Operator::kNoProperties, // properties
+ kNoCalleeSaved, // callee-saved
+ kNoCalleeSaved, // callee-saved fp
+ CallDescriptor::kCanUseRoots | // flags
+ flags, // flags
+ "js-call");
}
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Zone* zone, Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties) {
- UNIMPLEMENTED();
- return NULL;
+CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
+ MachineSignature::Builder types(zone, 0, 5);
+ LocationSignature::Builder locations(zone, 0, 5);
+
+ // Add registers for fixed parameters passed via interpreter dispatch.
+ STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
+ types.AddParam(kMachAnyTagged);
+ locations.AddParam(regloc(kInterpreterAccumulatorRegister));
+
+ STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
+ types.AddParam(kMachPtr);
+ locations.AddParam(regloc(kInterpreterRegisterFileRegister));
+
+ STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
+ types.AddParam(kMachIntPtr);
+ locations.AddParam(regloc(kInterpreterBytecodeOffsetRegister));
+
+ STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
+ types.AddParam(kMachAnyTagged);
+ locations.AddParam(regloc(kInterpreterBytecodeArrayRegister));
+
+ STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
+ types.AddParam(kMachPtr);
+ locations.AddParam(regloc(kInterpreterDispatchTableRegister));
+
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallCodeObject, // kind
+ kMachNone, // target MachineType
+ target_loc, // target location
+ types.Build(), // machine_sig
+ locations.Build(), // location_sig
+ 0, // stack_parameter_count
+ Operator::kNoProperties, // properties
+ kNoCalleeSaved, // callee-saved registers
+ kNoCalleeSaved, // callee-saved fp regs
+ CallDescriptor::kSupportsTailCalls | // flags
+ CallDescriptor::kCanUseRoots, // flags
+ "interpreter-dispatch");
}
+// TODO(all): Add support for return representations/locations to
+// CallInterfaceDescriptor.
+// TODO(turbofan): cache call descriptors for code stub calls.
CallDescriptor* Linkage::GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
Operator::Properties properties, MachineType return_type) {
- UNIMPLEMENTED();
- return NULL;
+ const int register_parameter_count = descriptor.GetRegisterParameterCount();
+ const int js_parameter_count =
+ register_parameter_count + stack_parameter_count;
+ const int context_count = 1;
+ const size_t return_count = 1;
+ const size_t parameter_count =
+ static_cast<size_t>(js_parameter_count + context_count);
+
+ LocationSignature::Builder locations(zone, return_count, parameter_count);
+ MachineSignature::Builder types(zone, return_count, parameter_count);
+
+ // Add return location.
+ locations.AddReturn(regloc(kReturnRegister0));
+ types.AddReturn(return_type);
+
+ // Add parameters in registers and on the stack.
+ for (int i = 0; i < js_parameter_count; i++) {
+ if (i < register_parameter_count) {
+ // The first parameters go in registers.
+ Register reg = descriptor.GetRegisterParameter(i);
+ Representation rep =
+ RepresentationFromType(descriptor.GetParameterType(i));
+ locations.AddParam(regloc(reg));
+ types.AddParam(reptyp(rep));
+ } else {
+ // The rest of the parameters go on the stack.
+ int stack_slot = i - register_parameter_count - stack_parameter_count;
+ locations.AddParam(LinkageLocation::ForCallerFrameSlot(stack_slot));
+ types.AddParam(kMachAnyTagged);
+ }
+ }
+ // Add context.
+ locations.AddParam(regloc(kContextRegister));
+ types.AddParam(kMachAnyTagged);
+
+ // The target for stub calls is a code object.
+ MachineType target_type = kMachAnyTagged;
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallCodeObject, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ types.Build(), // machine_sig
+ locations.Build(), // location_sig
+ stack_parameter_count, // stack_parameter_count
+ properties, // properties
+ kNoCalleeSaved, // callee-saved registers
+ kNoCalleeSaved, // callee-saved fp
+ flags, // flags
+ descriptor.DebugName(isolate));
}
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- const MachineSignature* sig) {
- UNIMPLEMENTED();
- return NULL;
+LinkageLocation Linkage::GetOsrValueLocation(int index) const {
+ CHECK(incoming_->IsJSFunctionCall());
+ int parameter_count = static_cast<int>(incoming_->JSParameterCount() - 1);
+ int first_stack_slot = OsrHelper::FirstStackSlotIndex(parameter_count);
+
+ if (index == kOsrContextSpillSlotIndex) {
+ // Context. Use the parameter location of the context spill slot.
+ // Parameter (arity + 1) is special for the context of the function frame.
+ int context_index = 1 + 1 + parameter_count; // target + receiver + params
+ return incoming_->GetInputLocation(context_index);
+ } else if (index >= first_stack_slot) {
+ // Local variable stored in this (callee) stack.
+ int spill_index =
+ index - first_stack_slot + StandardFrameConstants::kFixedSlotCount;
+ return LinkageLocation::ForCalleeFrameSlot(spill_index);
+ } else {
+ // Parameter. Use the assigned location from the incoming call descriptor.
+ int parameter_index = 1 + index; // skip index 0, which is the target.
+ return incoming_->GetInputLocation(parameter_index);
+ }
}
-#endif // !V8_TURBOFAN_BACKEND
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 31b9faca2a..f5507a0594 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -17,42 +17,89 @@ namespace v8 {
namespace internal {
class CallInterfaceDescriptor;
+class CompilationInfo;
namespace compiler {
+const RegList kNoCalleeSaved = 0;
+
class Node;
class OsrHelper;
// Describes the location for a parameter or a return value to a call.
class LinkageLocation {
public:
- explicit LinkageLocation(int location) : location_(location) {}
+ bool operator==(const LinkageLocation& other) const {
+ return bit_field_ == other.bit_field_;
+ }
- bool is_register() const {
- return 0 <= location_ && location_ <= ANY_REGISTER;
+ bool operator!=(const LinkageLocation& other) const {
+ return !(*this == other);
}
- static const int16_t ANY_REGISTER = 1023;
- static const int16_t MAX_STACK_SLOT = 32767;
+ static LinkageLocation ForAnyRegister() {
+ return LinkageLocation(REGISTER, ANY_REGISTER);
+ }
- static LinkageLocation AnyRegister() { return LinkageLocation(ANY_REGISTER); }
+ static LinkageLocation ForRegister(int32_t reg) {
+ DCHECK(reg >= 0);
+ return LinkageLocation(REGISTER, reg);
+ }
- bool operator==(const LinkageLocation& other) const {
- return location_ == other.location_;
+ static LinkageLocation ForCallerFrameSlot(int32_t slot) {
+ DCHECK(slot < 0);
+ return LinkageLocation(STACK_SLOT, slot);
}
- bool operator!=(const LinkageLocation& other) const {
- return !(*this == other);
+ static LinkageLocation ForCalleeFrameSlot(int32_t slot) {
+ // TODO(titzer): bailout instead of crashing here.
+ DCHECK(slot >= 0 && slot < LinkageLocation::MAX_STACK_SLOT);
+ return LinkageLocation(STACK_SLOT, slot);
}
private:
friend class CallDescriptor;
friend class OperandGenerator;
- // location < 0 -> a stack slot on the caller frame
- // 0 <= location < 1023 -> a specific machine register
- // 1023 <= location < 1024 -> any machine register
- // 1024 <= location -> a stack slot in the callee frame
- int16_t location_;
+
+ enum LocationType { REGISTER, STACK_SLOT };
+
+ class TypeField : public BitField<LocationType, 0, 1> {};
+ class LocationField : public BitField<int32_t, TypeField::kNext, 31> {};
+
+ static const int32_t ANY_REGISTER = -1;
+ static const int32_t MAX_STACK_SLOT = 32767;
+
+ LinkageLocation(LocationType type, int32_t location) {
+ bit_field_ = TypeField::encode(type) |
+ ((location << LocationField::kShift) & LocationField::kMask);
+ }
+
+ int32_t GetLocation() const {
+ return static_cast<int32_t>(bit_field_ & LocationField::kMask) >>
+ LocationField::kShift;
+ }
+
+ bool IsRegister() const { return TypeField::decode(bit_field_) == REGISTER; }
+ bool IsAnyRegister() const {
+ return IsRegister() && GetLocation() == ANY_REGISTER;
+ }
+ bool IsCallerFrameSlot() const { return !IsRegister() && GetLocation() < 0; }
+ bool IsCalleeFrameSlot() const { return !IsRegister() && GetLocation() >= 0; }
+
+ int32_t AsRegister() const {
+ DCHECK(IsRegister());
+ return GetLocation();
+ }
+ int32_t AsCallerFrameSlot() const {
+ DCHECK(IsCallerFrameSlot());
+ return GetLocation();
+ }
+ int32_t AsCalleeFrameSlot() const {
+ DCHECK(IsCalleeFrameSlot());
+ return GetLocation();
+ }
+
+ int32_t bit_field_;
};
typedef Signature<LinkageLocation> LocationSignature;
@@ -63,9 +110,9 @@ class CallDescriptor final : public ZoneObject {
public:
// Describes the kind of this call, which determines the target.
enum Kind {
- kCallCodeObject, // target is a Code object
- kCallJSFunction, // target is a JSFunction object
- kCallAddress // target is a machine pointer
+ kCallCodeObject, // target is a Code object
+ kCallJSFunction, // target is a JSFunction object
+ kCallAddress, // target is a machine pointer
};
enum Flag {
@@ -76,13 +123,14 @@ class CallDescriptor final : public ZoneObject {
kHasExceptionHandler = 1u << 3,
kHasLocalCatchHandler = 1u << 4,
kSupportsTailCalls = 1u << 5,
+ kCanUseRoots = 1u << 6,
kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
};
typedef base::Flags<Flag> Flags;
CallDescriptor(Kind kind, MachineType target_type, LinkageLocation target_loc,
const MachineSignature* machine_sig,
- LocationSignature* location_sig, size_t js_param_count,
+ LocationSignature* location_sig, size_t stack_param_count,
Operator::Properties properties,
RegList callee_saved_registers,
RegList callee_saved_fp_registers, Flags flags,
@@ -92,7 +140,7 @@ class CallDescriptor final : public ZoneObject {
target_loc_(target_loc),
machine_sig_(machine_sig),
location_sig_(location_sig),
- js_param_count_(js_param_count),
+ stack_param_count_(stack_param_count),
properties_(properties),
callee_saved_registers_(callee_saved_registers),
callee_saved_fp_registers_(callee_saved_fp_registers),
@@ -117,9 +165,14 @@ class CallDescriptor final : public ZoneObject {
// The number of C parameters to this call.
size_t CParameterCount() const { return machine_sig_->parameter_count(); }
- // The number of JavaScript parameters to this call, including the receiver
- // object.
- size_t JSParameterCount() const { return js_param_count_; }
+ // The number of stack parameters to the call.
+ size_t StackParameterCount() const { return stack_param_count_; }
+
+ // The number of parameters to the JS function call.
+ size_t JSParameterCount() const {
+ DCHECK(IsJSFunctionCall());
+ return stack_param_count_;
+ }
// The total number of inputs to this call, which includes the target,
// receiver, context, etc.
@@ -178,7 +231,7 @@ class CallDescriptor final : public ZoneObject {
const LinkageLocation target_loc_;
const MachineSignature* const machine_sig_;
const LocationSignature* const location_sig_;
- const size_t js_param_count_;
+ const size_t stack_param_count_;
const Operator::Properties properties_;
const RegList callee_saved_registers_;
const RegList callee_saved_fp_registers_;
@@ -235,6 +288,11 @@ class Linkage : public ZoneObject {
static CallDescriptor* GetSimplifiedCDescriptor(Zone* zone,
const MachineSignature* sig);
+ // Creates a call descriptor for interpreter handler code stubs. These are not
+ // intended to be called directly but are instead dispatched to by the
+ // interpreter.
+ static CallDescriptor* GetInterpreterDispatchDescriptor(Zone* zone);
+
// Get the location of an (incoming) parameter to this function.
LinkageLocation GetParameterLocation(int index) const {
return incoming_->GetInputLocation(index + 1); // + 1 to skip target.
@@ -256,9 +314,8 @@ class Linkage : public ZoneObject {
// Get the frame offset for a given spill slot. The location depends on the
// calling convention and the specific frame layout, and may thus be
// architecture-specific. Negative spill slots indicate arguments on the
- // caller's frame. The {extra} parameter indicates an additional offset from
- // the frame offset, e.g. to index into part of a double slot.
- FrameOffset GetFrameOffset(int spill_slot, Frame* frame, int extra = 0) const;
+ // caller's frame.
+ FrameOffset GetFrameOffset(int spill_slot, Frame* frame) const;
static int FrameStateInputCount(Runtime::FunctionId function);
@@ -271,6 +328,14 @@ class Linkage : public ZoneObject {
// A special {OsrValue} index to indicate the context spill slot.
static const int kOsrContextSpillSlotIndex = -1;
+ // Special parameter indices used to pass fixed register data through
+ // interpreter dispatches.
+ static const int kInterpreterAccumulatorParameter = 0;
+ static const int kInterpreterRegisterFileParameter = 1;
+ static const int kInterpreterBytecodeOffsetParameter = 2;
+ static const int kInterpreterBytecodeArrayParameter = 3;
+ static const int kInterpreterDispatchTableParameter = 4;
+
private:
CallDescriptor* const incoming_;
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 3ce59733eb..86e677d8ee 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -439,6 +439,10 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReduceFloat64InsertHighWord32(node);
case IrOpcode::kStore:
return ReduceStore(node);
+ case IrOpcode::kFloat64Equal:
+ case IrOpcode::kFloat64LessThan:
+ case IrOpcode::kFloat64LessThanOrEqual:
+ return ReduceFloat64Compare(node);
default:
break;
}
@@ -1004,6 +1008,37 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) {
}
+Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
+ DCHECK((IrOpcode::kFloat64Equal == node->opcode()) ||
+ (IrOpcode::kFloat64LessThan == node->opcode()) ||
+ (IrOpcode::kFloat64LessThanOrEqual == node->opcode()));
+ // As all Float32 values have an exact representation in Float64, comparing
+ // two Float64 values both converted from Float32 is equivalent to comparing
+ // the original Float32s, so we can ignore the conversions.
+ Float64BinopMatcher m(node);
+ if (m.left().IsChangeFloat32ToFloat64() &&
+ m.right().IsChangeFloat32ToFloat64()) {
+ switch (node->opcode()) {
+ case IrOpcode::kFloat64Equal:
+ node->set_op(machine()->Float32Equal());
+ break;
+ case IrOpcode::kFloat64LessThan:
+ node->set_op(machine()->Float32LessThan());
+ break;
+ case IrOpcode::kFloat64LessThanOrEqual:
+ node->set_op(machine()->Float32LessThanOrEqual());
+ break;
+ default:
+ return NoChange();
+ }
+ node->ReplaceInput(0, m.left().InputAt(0));
+ node->ReplaceInput(1, m.right().InputAt(0));
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
CommonOperatorBuilder* MachineOperatorReducer::common() const {
return jsgraph()->common();
}
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index b0976b78d2..7f8ff1a5fd 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -80,6 +80,7 @@ class MachineOperatorReducer final : public Reducer {
Reduction ReduceWord32Or(Node* node);
Reduction ReduceFloat64InsertLowWord32(Node* node);
Reduction ReduceFloat64InsertHighWord32(Node* node);
+ Reduction ReduceFloat64Compare(Node* node);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/machine-type.h b/deps/v8/src/compiler/machine-type.h
index f152611a14..0cd2a84010 100644
--- a/deps/v8/src/compiler/machine-type.h
+++ b/deps/v8/src/compiler/machine-type.h
@@ -116,6 +116,11 @@ inline int ElementSizeOf(MachineType machine_type) {
return 1 << shift;
}
+inline bool IsFloatingPoint(MachineType type) {
+ MachineType rep = RepresentationOf(type);
+ return rep == kRepFloat32 || rep == kRepFloat64;
+}
+
typedef Signature<MachineType> MachineSignature;
} // namespace compiler
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 5a69658cbf..560ef26692 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -6,6 +6,7 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
#include "src/mips/macro-assembler-mips.h"
#include "src/scopes.h"
@@ -106,12 +107,9 @@ class MipsOperandConverter final : public InstructionOperandConverter {
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK(op != NULL);
- DCHECK(!op->IsRegister());
- DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- // The linkage computes where all spill slots are located.
- FrameOffset offset = linkage()->GetFrameOffset(
- AllocatedOperand::cast(op)->index(), frame(), 0);
+ FrameOffset offset =
+ linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -263,8 +261,8 @@ Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
return kNoCondition;
}
-FPUCondition FlagsConditionToConditionCmpD(bool& predicate,
- FlagsCondition condition) {
+FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
+ FlagsCondition condition) {
switch (condition) {
case kEqual:
predicate = true;
@@ -793,14 +791,23 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break;
case kMipsPush:
- __ Push(i.InputRegister(0));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Subu(sp, sp, Operand(kDoubleSize));
+ } else {
+ __ Push(i.InputRegister(0));
+ }
break;
case kMipsStackClaim: {
__ Subu(sp, sp, Operand(i.InputInt32(0)));
break;
}
case kMipsStoreToStackSlot: {
- __ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ } else {
+ __ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
+ }
break;
}
case kMipsStoreWriteBarrier: {
@@ -982,22 +989,33 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ Branch(USE_DELAY_SLOT, &done, cc, left, right);
__ li(result, Operand(1)); // In delay slot.
- } else if (instr->arch_opcode() == kMipsCmpD) {
+ } else if (instr->arch_opcode() == kMipsCmpD ||
+ instr->arch_opcode() == kMipsCmpS) {
FPURegister left = i.InputDoubleRegister(0);
FPURegister right = i.InputDoubleRegister(1);
bool predicate;
- FPUCondition cc = FlagsConditionToConditionCmpD(predicate, condition);
+ FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
if (!IsMipsArchVariant(kMips32r6)) {
__ li(result, Operand(1));
- __ c(cc, D, left, right);
+ if (instr->arch_opcode() == kMipsCmpD) {
+ __ c(cc, D, left, right);
+ } else {
+ DCHECK(instr->arch_opcode() == kMipsCmpS);
+ __ c(cc, S, left, right);
+ }
if (predicate) {
__ Movf(result, zero_reg);
} else {
__ Movt(result, zero_reg);
}
} else {
- __ cmp(cc, L, kDoubleCompareReg, left, right);
+ if (instr->arch_opcode() == kMipsCmpD) {
+ __ cmp(cc, L, kDoubleCompareReg, left, right);
+ } else {
+ DCHECK(instr->arch_opcode() == kMipsCmpS);
+ __ cmp(cc, W, kDoubleCompareReg, left, right);
+ }
__ mfc1(at, kDoubleCompareReg);
__ srl(result, at, 31); // Cmp returns all 1s for true.
if (!predicate) // Toggle result for not equal.
@@ -1060,35 +1078,17 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
__ Push(ra, fp);
__ mov(fp, sp);
-
- const RegList saves = descriptor->CalleeSavedRegisters();
- // Save callee-saved registers.
- __ MultiPush(saves);
- // kNumCalleeSaved includes the fp register, but the fp register
- // is saved separately in TF.
- DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
- int register_save_area_size = kNumCalleeSaved * kPointerSize;
-
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
- // Save callee-saved FPU registers.
- __ MultiPushFPU(saves_fpu);
- DCHECK(kNumCalleeSavedFPU == base::bits::CountPopulation32(saves_fpu));
- register_save_area_size += kNumCalleeSavedFPU * kDoubleSize * kPointerSize;
-
- frame()->SetRegisterSaveAreaSize(register_save_area_size);
} else if (descriptor->IsJSFunctionCall()) {
CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
} else if (needs_frame_) {
__ StubPrologue();
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ frame()->SetElidedFrameSizeInSlots(0);
}
if (info()->is_osr()) {
@@ -1103,55 +1103,71 @@ void CodeGenerator::AssemblePrologue() {
osr_pc_offset_ = __ pc_offset();
// TODO(titzer): cannot address target function == local #-1
__ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
- stack_slots -= frame()->GetOsrStackSlotCount();
+ stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ }
+
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+ }
+ if (stack_shrink_slots > 0) {
+ __ Subu(sp, sp, Operand(stack_shrink_slots * kPointerSize));
}
- if (stack_slots > 0) {
- __ Subu(sp, sp, Operand(stack_slots * kPointerSize));
+ // Save callee-saved FPU registers.
+ if (saves_fpu != 0) {
+ __ MultiPushFPU(saves_fpu);
+ int count = base::bits::CountPopulation32(saves_fpu);
+ DCHECK(kNumCalleeSavedFPU == count);
+ frame()->AllocateSavedCalleeRegisterSlots(count *
+ (kDoubleSize / kPointerSize));
+ }
+
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ // Save callee-saved registers.
+ __ MultiPush(saves);
+ // kNumCalleeSaved includes the fp register, but the fp register
+ // is saved separately in TF.
+ int count = base::bits::CountPopulation32(saves);
+ DCHECK(kNumCalleeSaved == count + 1);
+ frame()->AllocateSavedCalleeRegisterSlots(count);
}
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
- if (frame()->GetRegisterSaveAreaSize() > 0) {
- // Remove this frame's spill slots first.
- if (stack_slots > 0) {
- __ Addu(sp, sp, Operand(stack_slots * kPointerSize));
- }
- // Restore FPU registers.
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
- __ MultiPopFPU(saves_fpu);
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
- // Restore GP registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
- __ MultiPop(saves);
- }
+ // Restore GP registers.
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+
+ // Restore FPU registers.
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ __ MultiPopFPU(saves_fpu);
+ }
+
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
__ mov(sp, fp);
__ Pop(ra, fp);
- __ Ret();
} else if (descriptor->IsJSFunctionCall() || needs_frame_) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ Branch(&return_label_);
+ return;
} else {
__ bind(&return_label_);
__ mov(sp, fp);
__ Pop(ra, fp);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : (info()->IsStub()
- ? info()->code_stub()->GetStackParameterCount()
- : 0);
- if (pop_count != 0) {
- __ DropAndRet(pop_count);
- } else {
- __ Ret();
- }
}
+ }
+ if (pop_count != 0) {
+ __ DropAndRet(pop_count);
} else {
__ Ret();
}
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index c2420ac0d8..f95c82627b 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -533,23 +533,23 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
int slot = kCArgSlotCount;
- for (Node* node : buffer.pushed_nodes) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ for (Node* input : buffer.pushed_nodes) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(slot << kPointerSizeLog2));
++slot;
}
} else {
// Possibly align stack here for functions.
- int push_count = buffer.pushed_nodes.size();
+ int push_count = static_cast<int>(descriptor->StackParameterCount());
if (push_count > 0) {
Emit(kMipsStackClaim, g.NoOutput(),
g.TempImmediate(push_count << kPointerSizeLog2));
}
- int slot = buffer.pushed_nodes.size() - 1;
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(node),
- g.TempImmediate(slot << kPointerSizeLog2));
- slot--;
+ for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
+ if (Node* input = buffer.pushed_nodes[n]) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input),
+ g.TempImmediate(n << kPointerSizeLog2));
+ }
}
}
@@ -636,14 +636,14 @@ void InstructionSelector::VisitTailCall(Node* node) {
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(node, &buffer, true, false);
// Possibly align stack here for functions.
- int push_count = buffer.pushed_nodes.size();
+ int push_count = static_cast<int>(descriptor->StackParameterCount());
if (push_count > 0) {
Emit(kMipsStackClaim, g.NoOutput(),
g.TempImmediate(push_count << kPointerSizeLog2));
}
- int slot = buffer.pushed_nodes.size() - 1;
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ int slot = static_cast<int>(buffer.pushed_nodes.size()) - 1;
+ for (Node* input : base::Reversed(buffer.pushed_nodes)) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(slot << kPointerSizeLog2));
slot--;
}
diff --git a/deps/v8/src/compiler/mips/linkage-mips.cc b/deps/v8/src/compiler/mips/linkage-mips.cc
deleted file mode 100644
index 7b03340a0a..0000000000
--- a/deps/v8/src/compiler/mips/linkage-mips.cc
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/linkage-impl.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-struct MipsLinkageHelperTraits {
- static Register ReturnValueReg() { return v0; }
- static Register ReturnValue2Reg() { return v1; }
- static Register JSCallFunctionReg() { return a1; }
- static Register ContextReg() { return cp; }
- static Register RuntimeCallFunctionReg() { return a1; }
- static Register RuntimeCallArgCountReg() { return a0; }
- static RegList CCalleeSaveRegisters() {
- return s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() |
- s6.bit() | s7.bit();
- }
- static RegList CCalleeSaveFPRegisters() {
- return f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() |
- f30.bit();
- }
- static Register CRegisterParameter(int i) {
- static Register register_parameters[] = {a0, a1, a2, a3};
- return register_parameters[i];
- }
- static int CRegisterParametersLength() { return 4; }
- static int CStackBackingStoreLength() { return 0; }
-};
-
-
-typedef LinkageHelper<MipsLinkageHelperTraits> LH;
-
-CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
- int parameter_count,
- CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Zone* zone, Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties) {
- return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
- properties);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
- int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties, MachineType return_type) {
- return LH::GetStubCallDescriptor(isolate, zone, descriptor,
- stack_parameter_count, flags, properties,
- return_type);
-}
-
-
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- const MachineSignature* sig) {
- return LH::GetSimplifiedCDescriptor(zone, sig);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 72114215a0..c72d9789b8 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -6,6 +6,7 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
#include "src/mips/macro-assembler-mips.h"
#include "src/scopes.h"
@@ -106,12 +107,9 @@ class MipsOperandConverter final : public InstructionOperandConverter {
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK(op != NULL);
- DCHECK(!op->IsRegister());
- DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- // The linkage computes where all spill slots are located.
- FrameOffset offset = linkage()->GetFrameOffset(
- AllocatedOperand::cast(op)->index(), frame(), 0);
+ FrameOffset offset =
+ linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -264,8 +262,8 @@ Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
}
-FPUCondition FlagsConditionToConditionCmpD(bool& predicate,
- FlagsCondition condition) {
+FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
+ FlagsCondition condition) {
switch (condition) {
case kEqual:
predicate = true;
@@ -440,7 +438,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
-
__ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(at);
RecordCallPosition(instr);
@@ -453,7 +450,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
-
AssembleDeconstructActivationRecord();
__ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(at);
@@ -553,8 +549,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMips64DmodU:
__ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
- break;
case kMips64And:
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -775,14 +769,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
break;
}
- case kMips64CvtSD: {
+ case kMips64CvtSD:
__ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
break;
- }
- case kMips64CvtDS: {
+ case kMips64CvtDS:
__ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
break;
- }
case kMips64CvtDW: {
FPURegister scratch = kScratchDoubleReg;
__ mtc1(i.InputRegister(0), scratch);
@@ -868,14 +860,23 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break;
case kMips64Push:
- __ Push(i.InputRegister(0));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Subu(sp, sp, Operand(kDoubleSize));
+ } else {
+ __ Push(i.InputRegister(0));
+ }
break;
case kMips64StackClaim: {
__ Dsubu(sp, sp, Operand(i.InputInt32(0)));
break;
}
case kMips64StoreToStackSlot: {
- __ sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ } else {
+ __ sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
+ }
break;
}
case kMips64StoreWriteBarrier: {
@@ -927,7 +928,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
break;
}
-}
+} // NOLINT(readability/fn_size)
#define UNSUPPORTED_COND(opcode, condition) \
@@ -1054,22 +1055,33 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
cc = FlagsConditionToConditionCmp(condition);
__ Branch(USE_DELAY_SLOT, &done, cc, left, right);
__ li(result, Operand(1)); // In delay slot.
- } else if (instr->arch_opcode() == kMips64CmpD) {
+ } else if (instr->arch_opcode() == kMips64CmpD ||
+ instr->arch_opcode() == kMips64CmpS) {
FPURegister left = i.InputDoubleRegister(0);
FPURegister right = i.InputDoubleRegister(1);
bool predicate;
- FPUCondition cc = FlagsConditionToConditionCmpD(predicate, condition);
+ FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
if (kArchVariant != kMips64r6) {
__ li(result, Operand(1));
- __ c(cc, D, left, right);
+ if (instr->arch_opcode() == kMips64CmpD) {
+ __ c(cc, D, left, right);
+ } else {
+ DCHECK(instr->arch_opcode() == kMips64CmpS);
+ __ c(cc, S, left, right);
+ }
if (predicate) {
__ Movf(result, zero_reg);
} else {
__ Movt(result, zero_reg);
}
} else {
- __ cmp(cc, L, kDoubleCompareReg, left, right);
+ if (instr->arch_opcode() == kMips64CmpD) {
+ __ cmp(cc, L, kDoubleCompareReg, left, right);
+ } else {
+ DCHECK(instr->arch_opcode() == kMips64CmpS);
+ __ cmp(cc, W, kDoubleCompareReg, left, right);
+ }
__ dmfc1(at, kDoubleCompareReg);
__ dsrl32(result, at, 31); // Cmp returns all 1s for true.
if (!predicate) // Toggle result for not equal.
@@ -1136,37 +1148,19 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
__ Push(ra, fp);
__ mov(fp, sp);
-
- const RegList saves = descriptor->CalleeSavedRegisters();
- // Save callee-saved registers.
- __ MultiPush(saves);
- // kNumCalleeSaved includes the fp register, but the fp register
- // is saved separately in TF.
- DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
- int register_save_area_size = kNumCalleeSaved * kPointerSize;
-
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
- // Save callee-saved FPU registers.
- __ MultiPushFPU(saves_fpu);
- DCHECK(kNumCalleeSavedFPU == base::bits::CountPopulation32(saves_fpu));
- register_save_area_size += kNumCalleeSavedFPU * kDoubleSize * kPointerSize;
-
- frame()->SetRegisterSaveAreaSize(register_save_area_size);
} else if (descriptor->IsJSFunctionCall()) {
CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
} else if (needs_frame_) {
__ StubPrologue();
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ frame()->SetElidedFrameSizeInSlots(0);
}
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1179,55 +1173,68 @@ void CodeGenerator::AssemblePrologue() {
osr_pc_offset_ = __ pc_offset();
// TODO(titzer): cannot address target function == local #-1
__ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
- stack_slots -= frame()->GetOsrStackSlotCount();
+ stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
- if (stack_slots > 0) {
- __ Dsubu(sp, sp, Operand(stack_slots * kPointerSize));
+ if (stack_shrink_slots > 0) {
+ __ Dsubu(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+ }
+
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(saves_fpu);
+ int count = base::bits::CountPopulation32(saves_fpu);
+ DCHECK(kNumCalleeSavedFPU == count);
+ frame()->AllocateSavedCalleeRegisterSlots(count *
+ (kDoubleSize / kPointerSize));
+ }
+
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ // Save callee-saved registers.
+ __ MultiPush(saves);
+ // kNumCalleeSaved includes the fp register, but the fp register
+ // is saved separately in TF.
+ int count = base::bits::CountPopulation32(saves);
+ DCHECK(kNumCalleeSaved == count + 1);
+ frame()->AllocateSavedCalleeRegisterSlots(count);
}
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
- if (frame()->GetRegisterSaveAreaSize() > 0) {
- // Remove this frame's spill slots first.
- if (stack_slots > 0) {
- __ Daddu(sp, sp, Operand(stack_slots * kPointerSize));
- }
- // Restore FPU registers.
- const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
- __ MultiPopFPU(saves_fpu);
- // Restore GP registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
- __ MultiPop(saves);
- }
+ // Restore GP registers.
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+
+ // Restore FPU registers.
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ __ MultiPopFPU(saves_fpu);
+ }
+
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
__ mov(sp, fp);
__ Pop(ra, fp);
- __ Ret();
} else if (descriptor->IsJSFunctionCall() || needs_frame_) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ Branch(&return_label_);
+ return;
} else {
__ bind(&return_label_);
__ mov(sp, fp);
__ Pop(ra, fp);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : (info()->IsStub()
- ? info()->code_stub()->GetStackParameterCount()
- : 0);
- if (pop_count != 0) {
- __ DropAndRet(pop_count);
- } else {
- __ Ret();
- }
}
+ }
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ if (pop_count != 0) {
+ __ DropAndRet(pop_count);
} else {
__ Ret();
}
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index e4d8795f1b..33d6f58c28 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -682,22 +682,22 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
int slot = kCArgSlotCount;
- for (Node* node : buffer.pushed_nodes) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ for (Node* input : buffer.pushed_nodes) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(slot << kPointerSizeLog2));
++slot;
}
} else {
- const int32_t push_count = static_cast<int32_t>(buffer.pushed_nodes.size());
+ int push_count = static_cast<int>(descriptor->StackParameterCount());
if (push_count > 0) {
Emit(kMips64StackClaim, g.NoOutput(),
g.TempImmediate(push_count << kPointerSizeLog2));
}
- int32_t slot = push_count - 1;
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
- g.TempImmediate(slot << kPointerSizeLog2));
- slot--;
+ for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
+ if (Node* input = buffer.pushed_nodes[n]) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
+ g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
+ }
}
}
@@ -791,8 +791,8 @@ void InstructionSelector::VisitTailCall(Node* node) {
g.TempImmediate(push_count << kPointerSizeLog2));
}
int slot = push_count - 1;
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ for (Node* input : base::Reversed(buffer.pushed_nodes)) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(slot << kPointerSizeLog2));
slot--;
}
diff --git a/deps/v8/src/compiler/mips64/linkage-mips64.cc b/deps/v8/src/compiler/mips64/linkage-mips64.cc
deleted file mode 100644
index acfedb715f..0000000000
--- a/deps/v8/src/compiler/mips64/linkage-mips64.cc
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/linkage-impl.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-struct MipsLinkageHelperTraits {
- static Register ReturnValueReg() { return v0; }
- static Register ReturnValue2Reg() { return v1; }
- static Register JSCallFunctionReg() { return a1; }
- static Register ContextReg() { return cp; }
- static Register RuntimeCallFunctionReg() { return a1; }
- static Register RuntimeCallArgCountReg() { return a0; }
- static RegList CCalleeSaveRegisters() {
- return s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() |
- s6.bit() | s7.bit();
- }
- static RegList CCalleeSaveFPRegisters() {
- return f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() |
- f30.bit();
- }
- static Register CRegisterParameter(int i) {
- static Register register_parameters[] = {a0, a1, a2, a3, a4, a5, a6, a7};
- return register_parameters[i];
- }
- static int CRegisterParametersLength() { return 8; }
- static int CStackBackingStoreLength() { return 0; }
-};
-
-
-typedef LinkageHelper<MipsLinkageHelperTraits> LH;
-
-CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
- int parameter_count,
- CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Zone* zone, Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties) {
- return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
- properties);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
- int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties, MachineType return_type) {
- return LH::GetStubCallDescriptor(isolate, zone, descriptor,
- stack_parameter_count, flags, properties,
- return_type);
-}
-
-
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- const MachineSignature* sig) {
- return LH::GetSimplifiedCDescriptor(zone, sig);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/move-optimizer.cc b/deps/v8/src/compiler/move-optimizer.cc
index b869185e60..7c2bbe06b8 100644
--- a/deps/v8/src/compiler/move-optimizer.cc
+++ b/deps/v8/src/compiler/move-optimizer.cc
@@ -59,6 +59,17 @@ void MoveOptimizer::Run() {
}
for (auto block : code()->instruction_blocks()) {
if (block->PredecessorCount() <= 1) continue;
+ bool has_only_deferred = true;
+ for (RpoNumber pred_id : block->predecessors()) {
+ if (!code()->InstructionBlockAt(pred_id)->IsDeferred()) {
+ has_only_deferred = false;
+ break;
+ }
+ }
+ // This would pull down common moves. If the moves occur in deferred blocks,
+ // and the closest common successor is not deferred, we lose the
+ // optimization of just spilling/filling in deferred blocks.
+ if (has_only_deferred) continue;
OptimizeMerge(block);
}
for (auto gap : to_finalize_) {
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 6557635a2e..e6c9f23fdc 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -7,7 +7,7 @@
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/types-inl.h"
+#include "src/types.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -210,7 +210,6 @@ class Node final {
uint32_t bit_field_;
int input_index() const { return InputIndexField::decode(bit_field_); }
- int output_index() const { return OutputIndexField::decode(bit_field_); }
bool is_inline_use() const { return InlineField::decode(bit_field_); }
Node** input_ptr() {
int index = input_index();
@@ -229,7 +228,8 @@ class Node final {
typedef BitField<bool, 0, 1> InlineField;
typedef BitField<unsigned, 1, 17> InputIndexField;
- typedef BitField<unsigned, 17, 14> OutputIndexField;
+ // Leaving some space in the bitset in case we ever decide to record
+ // the output index.
};
//============================================================================
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index 6c4c49de79..663cf57808 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -328,8 +328,6 @@ void OsrHelper::SetupFrame(Frame* frame) {
// The optimized frame will subsume the unoptimized frame. Do so by reserving
// the first spill slots.
frame->ReserveSpillSlots(UnoptimizedFrameSlots());
- // The frame needs to be adjusted by the number of unoptimized frame slots.
- frame->SetOsrStackSlotCount(static_cast<int>(UnoptimizedFrameSlots()));
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/pipeline-statistics.cc b/deps/v8/src/compiler/pipeline-statistics.cc
index 5e8d1272cf..ba705ba1d8 100644
--- a/deps/v8/src/compiler/pipeline-statistics.cc
+++ b/deps/v8/src/compiler/pipeline-statistics.cc
@@ -52,7 +52,7 @@ PipelineStatistics::PipelineStatistics(CompilationInfo* info,
phase_name_(NULL) {
if (info->has_shared_info()) {
source_size_ = static_cast<size_t>(info->shared_info()->SourceSize());
- SmartArrayPointer<char> name =
+ base::SmartArrayPointer<char> name =
info->shared_info()->DebugName()->ToCString();
function_name_ = name.get();
}
diff --git a/deps/v8/src/compiler/pipeline-statistics.h b/deps/v8/src/compiler/pipeline-statistics.h
index 01cc9de9d1..988327d1bb 100644
--- a/deps/v8/src/compiler/pipeline-statistics.h
+++ b/deps/v8/src/compiler/pipeline-statistics.h
@@ -36,7 +36,7 @@ class PipelineStatistics : public Malloced {
void End(PipelineStatistics* pipeline_stats,
CompilationStatistics::BasicStats* diff);
- SmartPointer<ZonePool::StatsScope> scope_;
+ base::SmartPointer<ZonePool::StatsScope> scope_;
base::ElapsedTimer timer_;
size_t outer_zone_initial_size_;
size_t allocated_bytes_at_start_;
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 55455690dd..964d77fe13 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -25,12 +25,14 @@
#include "src/compiler/instruction.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/js-context-relaxation.h"
#include "src/compiler/js-context-specialization.h"
#include "src/compiler/js-frame-specialization.h"
#include "src/compiler/js-generic-lowering.h"
#include "src/compiler/js-inlining.h"
#include "src/compiler/js-intrinsic-lowering.h"
#include "src/compiler/js-type-feedback.h"
+#include "src/compiler/js-type-feedback-lowering.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/jump-threading.h"
#include "src/compiler/load-elimination.h"
@@ -40,6 +42,7 @@
#include "src/compiler/move-optimizer.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline-statistics.h"
+#include "src/compiler/preprocess-live-ranges.h"
#include "src/compiler/register-allocator.h"
#include "src/compiler/register-allocator-verifier.h"
#include "src/compiler/schedule.h"
@@ -256,10 +259,18 @@ class PipelineData {
}
void InitializeRegisterAllocationData(const RegisterConfiguration* config,
+ CallDescriptor* descriptor,
const char* debug_name) {
DCHECK(frame_ == nullptr);
DCHECK(register_allocation_data_ == nullptr);
- frame_ = new (instruction_zone()) Frame();
+ int fixed_frame_size = 0;
+ if (descriptor != nullptr) {
+ fixed_frame_size = (descriptor->kind() == CallDescriptor::kCallAddress)
+ ? StandardFrameConstants::kFixedSlotCountAboveFp +
+ StandardFrameConstants::kCPSlotCount
+ : StandardFrameConstants::kFixedSlotCount;
+ }
+ frame_ = new (instruction_zone()) Frame(fixed_frame_size);
register_allocation_data_ = new (register_allocation_zone())
RegisterAllocationData(config, register_allocation_zone(), frame(),
sequence(), debug_name);
@@ -280,7 +291,7 @@ class PipelineData {
Zone* graph_zone_;
Graph* graph_;
// TODO(dcarney): make this into a ZoneObject.
- SmartPointer<SourcePositionTable> source_positions_;
+ base::SmartPointer<SourcePositionTable> source_positions_;
LoopAssignmentAnalysis* loop_assignment_;
MachineOperatorBuilder* machine_;
CommonOperatorBuilder* common_;
@@ -339,17 +350,17 @@ void TraceSchedule(CompilationInfo* info, Schedule* schedule) {
}
-SmartArrayPointer<char> GetDebugName(CompilationInfo* info) {
+base::SmartArrayPointer<char> GetDebugName(CompilationInfo* info) {
if (info->code_stub() != NULL) {
CodeStub::Major major_key = info->code_stub()->MajorKey();
const char* major_name = CodeStub::MajorName(major_key, false);
size_t len = strlen(major_name) + 1;
- SmartArrayPointer<char> name(new char[len]);
+ base::SmartArrayPointer<char> name(new char[len]);
memcpy(name.get(), major_name, len);
return name;
} else {
AllowHandleDereference allow_deref;
- return info->function()->debug_name()->ToCString();
+ return info->literal()->debug_name()->ToCString();
}
}
@@ -497,7 +508,9 @@ struct InliningPhase {
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
JSContextSpecialization context_specialization(
- &graph_reducer, data->jsgraph(), data->info()->context());
+ &graph_reducer, data->jsgraph(), data->info()->is_context_specializing()
+ ? data->info()->context()
+ : MaybeHandle<Context>());
JSFrameSpecialization frame_specialization(data->info()->osr_frame(),
data->jsgraph());
JSInliner inliner(&graph_reducer, data->info()->is_inlining_enabled()
@@ -509,9 +522,7 @@ struct InliningPhase {
if (data->info()->is_frame_specializing()) {
AddReducer(data, &graph_reducer, &frame_specialization);
}
- if (data->info()->is_context_specializing()) {
- AddReducer(data, &graph_reducer, &context_specialization);
- }
+ AddReducer(data, &graph_reducer, &context_specialization);
AddReducer(data, &graph_reducer, &inliner);
graph_reducer.ReduceGraph();
}
@@ -577,6 +588,11 @@ struct TypedLoweringPhase {
LoadElimination load_elimination(&graph_reducer);
JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(), temp_zone);
+ JSTypeFeedbackLowering type_feedback_lowering(
+ &graph_reducer, data->info()->is_deoptimization_enabled()
+ ? JSTypeFeedbackLowering::kDeoptimizationEnabled
+ : JSTypeFeedbackLowering::kNoFlags,
+ data->jsgraph());
JSIntrinsicLowering intrinsic_lowering(
&graph_reducer, data->jsgraph(),
data->info()->is_deoptimization_enabled()
@@ -588,6 +604,7 @@ struct TypedLoweringPhase {
AddReducer(data, &graph_reducer, &builtin_reducer);
AddReducer(data, &graph_reducer, &typed_lowering);
AddReducer(data, &graph_reducer, &intrinsic_lowering);
+ AddReducer(data, &graph_reducer, &type_feedback_lowering);
AddReducer(data, &graph_reducer, &load_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
@@ -697,6 +714,7 @@ struct GenericLoweringPhase {
void Run(PipelineData* data, Zone* temp_zone) {
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ JSContextRelaxation context_relaxing;
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
@@ -706,6 +724,7 @@ struct GenericLoweringPhase {
SelectLowering select_lowering(data->jsgraph()->graph(),
data->jsgraph()->common());
TailCallOptimization tco(data->common(), data->graph());
+ AddReducer(data, &graph_reducer, &context_relaxing);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
AddReducer(data, &graph_reducer, &generic_lowering);
@@ -775,6 +794,17 @@ struct BuildLiveRangesPhase {
};
+struct PreprocessLiveRangesPhase {
+ static const char* phase_name() { return "preprocess live ranges"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ PreprocessLiveRanges live_range_preprocessor(
+ data->register_allocation_data(), temp_zone);
+ live_range_preprocessor.PreprocessRanges();
+ }
+};
+
+
template <typename RegAllocator>
struct AllocateGeneralRegistersPhase {
static const char* phase_name() { return "allocate general registers"; }
@@ -973,7 +1003,7 @@ Handle<Code> Pipeline::GenerateCode() {
}
ZonePool zone_pool;
- SmartPointer<PipelineStatistics> pipeline_statistics;
+ base::SmartPointer<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats) {
pipeline_statistics.Reset(new PipelineStatistics(info(), &zone_pool));
@@ -985,8 +1015,8 @@ Handle<Code> Pipeline::GenerateCode() {
if (json_file != nullptr) {
OFStream json_of(json_file);
Handle<Script> script = info()->script();
- FunctionLiteral* function = info()->function();
- SmartArrayPointer<char> function_name =
+ FunctionLiteral* function = info()->literal();
+ base::SmartArrayPointer<char> function_name =
info()->shared_info()->DebugName()->ToCString();
int pos = info()->shared_info()->start_position();
json_of << "{\"function\":\"" << function_name.get()
@@ -1054,10 +1084,7 @@ Handle<Code> Pipeline::GenerateCode() {
GraphReplayPrinter::PrintReplay(data.graph());
}
- // Bailout here in case target architecture is not supported.
- if (!SupportedTarget()) return Handle<Code>::null();
-
- SmartPointer<Typer> typer;
+ base::SmartPointer<Typer> typer;
if (info()->is_typing_enabled()) {
// Type the graph.
typer.Reset(new Typer(isolate(), data.graph(), info()->function_type()));
@@ -1145,7 +1172,7 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
// Construct a pipeline for scheduling and code generation.
ZonePool zone_pool;
PipelineData data(&zone_pool, info, graph, schedule);
- SmartPointer<PipelineStatistics> pipeline_statistics;
+ base::SmartPointer<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats) {
pipeline_statistics.Reset(new PipelineStatistics(info, &zone_pool));
pipeline_statistics->BeginPhaseKind("test codegen");
@@ -1171,7 +1198,7 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
PipelineData data(&zone_pool, &info, sequence);
Pipeline pipeline(&info);
pipeline.data_ = &data;
- pipeline.AllocateRegisters(config, run_verifier);
+ pipeline.AllocateRegisters(config, nullptr, run_verifier);
return !data.compilation_failed();
}
@@ -1181,7 +1208,6 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
PipelineData* data = this->data_;
DCHECK_NOT_NULL(data->graph());
- CHECK(SupportedBackend());
if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
TraceSchedule(data->info(), data->schedule());
@@ -1216,7 +1242,8 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
bool run_verifier = FLAG_turbo_verify_allocation;
// Allocate registers.
- AllocateRegisters(RegisterConfiguration::ArchDefault(), run_verifier);
+ AllocateRegisters(RegisterConfiguration::ArchDefault(), call_descriptor,
+ run_verifier);
if (data->compilation_failed()) {
info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
return Handle<Code>();
@@ -1275,11 +1302,12 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
+ CallDescriptor* descriptor,
bool run_verifier) {
PipelineData* data = this->data_;
// Don't track usage for this zone in compiler stats.
- SmartPointer<Zone> verifier_zone;
+ base::SmartPointer<Zone> verifier_zone;
RegisterAllocatorVerifier* verifier = nullptr;
if (run_verifier) {
verifier_zone.Reset(new Zone());
@@ -1287,12 +1315,12 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
verifier_zone.get(), config, data->sequence());
}
- SmartArrayPointer<char> debug_name;
+ base::SmartArrayPointer<char> debug_name;
#ifdef DEBUG
debug_name = GetDebugName(data->info());
#endif
- data->InitializeRegisterAllocationData(config, debug_name.get());
+ data->InitializeRegisterAllocationData(config, descriptor, debug_name.get());
if (info()->is_osr()) {
OsrHelper osr_helper(info());
osr_helper.SetupFrame(data->frame());
@@ -1310,14 +1338,15 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
if (verifier != nullptr) {
CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
}
- if (FLAG_turbo_greedy_regalloc) {
- Run<AllocateGeneralRegistersPhase<GreedyAllocator>>();
- Run<AllocateDoubleRegistersPhase<GreedyAllocator>>();
- } else {
- Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
- Run<AllocateDoubleRegistersPhase<LinearScanAllocator>>();
+
+ if (FLAG_turbo_preprocess_ranges) {
+ Run<PreprocessLiveRangesPhase>();
}
+ // TODO(mtrofin): re-enable greedy once we have bots for range preprocessing.
+ Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
+ Run<AllocateDoubleRegistersPhase<LinearScanAllocator>>();
+
if (FLAG_turbo_frame_elision) {
Run<LocateSpillSlotsPhase>();
Run<FrameElisionPhase>();
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 240ff69f59..ea8b7e9b4b 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -46,9 +46,6 @@ class Pipeline {
InstructionSequence* sequence,
bool run_verifier);
- static inline bool SupportedBackend() { return V8_TURBOFAN_BACKEND != 0; }
- static inline bool SupportedTarget() { return V8_TURBOFAN_TARGET != 0; }
-
private:
static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
CallDescriptor* call_descriptor,
@@ -70,7 +67,7 @@ class Pipeline {
void RunPrintAndVerify(const char* phase, bool untyped = false);
Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
void AllocateRegisters(const RegisterConfiguration* config,
- bool run_verifier);
+ CallDescriptor* descriptor, bool run_verifier);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/ppc/OWNERS b/deps/v8/src/compiler/ppc/OWNERS
index a04d29a94f..eb007cb908 100644
--- a/deps/v8/src/compiler/ppc/OWNERS
+++ b/deps/v8/src/compiler/ppc/OWNERS
@@ -1,3 +1,4 @@
+jyan@ca.ibm.com
dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index cdc1424cc5..2acea2f1d5 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -7,6 +7,7 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
#include "src/ppc/macro-assembler-ppc.h"
#include "src/scopes.h"
@@ -99,12 +100,9 @@ class PPCOperandConverter final : public InstructionOperandConverter {
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK(op != NULL);
- DCHECK(!op->IsRegister());
- DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- // The linkage computes where all spill slots are located.
- FrameOffset offset = linkage()->GetFrameOffset(
- AllocatedOperand::cast(op)->index(), frame(), 0);
+ FrameOffset offset =
+ linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -975,17 +973,31 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
#endif
case kPPC_Push:
- __ Push(i.InputRegister(0));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ stfdu(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ } else {
+ __ Push(i.InputRegister(0));
+ }
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_PushFrame: {
int num_slots = i.InputInt32(1);
- __ StorePU(i.InputRegister(0), MemOperand(sp, -num_slots * kPointerSize));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ stfdu(i.InputDoubleRegister(0),
+ MemOperand(sp, -num_slots * kPointerSize));
+ } else {
+ __ StorePU(i.InputRegister(0),
+ MemOperand(sp, -num_slots * kPointerSize));
+ }
break;
}
case kPPC_StoreToStackSlot: {
int slot = i.InputInt32(1);
- __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ stfd(i.InputDoubleRegister(0), MemOperand(sp, slot * kPointerSize));
+ } else {
+ __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
+ }
break;
}
case kPPC_ExtendSignWord8:
@@ -1293,41 +1305,28 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
+
if (descriptor->kind() == CallDescriptor::kCallAddress) {
__ function_descriptor();
- int register_save_area_size = 0;
- RegList frame_saves = fp.bit();
__ mflr(r0);
if (FLAG_enable_embedded_constant_pool) {
__ Push(r0, fp, kConstantPoolRegister);
// Adjust FP to point to saved FP.
__ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
- register_save_area_size += kPointerSize;
- frame_saves |= kConstantPoolRegister.bit();
} else {
__ Push(r0, fp);
__ mr(fp, sp);
}
- // Save callee-saved registers.
- const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- register_save_area_size += kPointerSize;
- }
- frame()->SetRegisterSaveAreaSize(register_save_area_size);
- __ MultiPush(saves);
} else if (descriptor->IsJSFunctionCall()) {
CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
} else if (needs_frame_) {
__ StubPrologue();
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ frame()->SetElidedFrameSizeInSlots(0);
}
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1340,54 +1339,76 @@ void CodeGenerator::AssemblePrologue() {
osr_pc_offset_ = __ pc_offset();
// TODO(titzer): cannot address target function == local #-1
__ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
- stack_slots -= frame()->GetOsrStackSlotCount();
+ stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ }
+
+ const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ if (double_saves != 0) {
+ stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+ }
+ if (stack_shrink_slots > 0) {
+ __ Add(sp, sp, -stack_shrink_slots * kPointerSize, r0);
}
- if (stack_slots > 0) {
- __ Add(sp, sp, -stack_slots * kPointerSize, r0);
+ // Save callee-saved Double registers.
+ if (double_saves != 0) {
+ __ MultiPushDoubles(double_saves);
+ DCHECK(kNumCalleeSavedDoubles ==
+ base::bits::CountPopulation32(double_saves));
+ frame()->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
+ (kDoubleSize / kPointerSize));
+ }
+
+ // Save callee-saved registers.
+ const RegList saves =
+ FLAG_enable_embedded_constant_pool
+ ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
+ : descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPush(saves);
+ // register save area does not include the fp or constant pool pointer.
+ const int num_saves =
+ kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
+ DCHECK(num_saves == base::bits::CountPopulation32(saves));
+ frame()->AllocateSavedCalleeRegisterSlots(num_saves);
}
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+
+ // Restore registers.
+ const RegList saves =
+ FLAG_enable_embedded_constant_pool
+ ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
+ : descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+
+ // Restore double registers.
+ const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ if (double_saves != 0) {
+ __ MultiPopDoubles(double_saves);
+ }
+
if (descriptor->kind() == CallDescriptor::kCallAddress) {
- if (frame()->GetRegisterSaveAreaSize() > 0) {
- // Remove this frame's spill slots first.
- if (stack_slots > 0) {
- __ Add(sp, sp, stack_slots * kPointerSize, r0);
- }
- // Restore registers.
- RegList frame_saves = fp.bit();
- if (FLAG_enable_embedded_constant_pool) {
- frame_saves |= kConstantPoolRegister.bit();
- }
- const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
- if (saves != 0) {
- __ MultiPop(saves);
- }
- }
- __ LeaveFrame(StackFrame::MANUAL);
- __ Ret();
+ __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
} else if (descriptor->IsJSFunctionCall() || needs_frame_) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ b(&return_label_);
+ return;
} else {
__ bind(&return_label_);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : (info()->IsStub()
- ? info()->code_stub()->GetStackParameterCount()
- : 0);
__ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
- __ Ret();
}
} else {
- __ Ret();
+ __ Drop(pop_count);
}
+ __ Ret();
}
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 0fe2acb369..197dcc13ea 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -6,6 +6,7 @@
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
+#include "src/ppc/frames-ppc.h"
namespace v8 {
namespace internal {
@@ -1473,15 +1474,19 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
}
} else {
// Push any stack arguments.
- int num_slots = buffer.pushed_nodes.size();
+ int num_slots = static_cast<int>(descriptor->StackParameterCount());
int slot = 0;
- for (Node* node : buffer.pushed_nodes) {
+ for (Node* input : buffer.pushed_nodes) {
if (slot == 0) {
- Emit(kPPC_PushFrame, g.NoOutput(), g.UseRegister(node),
+ DCHECK(input);
+ Emit(kPPC_PushFrame, g.NoOutput(), g.UseRegister(input),
g.TempImmediate(num_slots));
} else {
- Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
- g.TempImmediate(slot));
+ // Skip any alignment holes in pushed nodes.
+ if (input) {
+ Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
+ g.TempImmediate(slot));
+ }
}
++slot;
}
@@ -1577,8 +1582,17 @@ void InstructionSelector::VisitTailCall(Node* node) {
InitializeCallBuffer(node, &buffer, true, false);
// Push any stack arguments.
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- Emit(kPPC_Push, g.NoOutput(), g.UseRegister(node));
+ int num_slots = static_cast<int>(descriptor->StackParameterCount());
+ int slot = 0;
+ for (Node* input : buffer.pushed_nodes) {
+ if (slot == 0) {
+ Emit(kPPC_PushFrame, g.NoOutput(), g.UseRegister(input),
+ g.TempImmediate(num_slots));
+ } else {
+ Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
+ g.TempImmediate(slot));
+ }
+ ++slot;
}
// Select the appropriate opcode based on the call type.
diff --git a/deps/v8/src/compiler/ppc/linkage-ppc.cc b/deps/v8/src/compiler/ppc/linkage-ppc.cc
deleted file mode 100644
index 677e9d0e6c..0000000000
--- a/deps/v8/src/compiler/ppc/linkage-ppc.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/linkage-impl.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-struct PPCLinkageHelperTraits {
- static Register ReturnValueReg() { return r3; }
- static Register ReturnValue2Reg() { return r4; }
- static Register JSCallFunctionReg() { return r4; }
- static Register ContextReg() { return cp; }
- static Register RuntimeCallFunctionReg() { return r4; }
- static Register RuntimeCallArgCountReg() { return r3; }
- static RegList CCalleeSaveRegisters() {
- return r14.bit() | r15.bit() | r16.bit() | r17.bit() | r18.bit() |
- r19.bit() | r20.bit() | r21.bit() | r22.bit() | r23.bit() |
- r24.bit() | r25.bit() | r26.bit() | r27.bit() | r28.bit() |
- r29.bit() | r30.bit() | fp.bit();
- }
- static RegList CCalleeSaveFPRegisters() { return 0; }
- static Register CRegisterParameter(int i) {
- static Register register_parameters[] = {r3, r4, r5, r6, r7, r8, r9, r10};
- return register_parameters[i];
- }
- static int CRegisterParametersLength() { return 8; }
- static int CStackBackingStoreLength() { return 0; }
-};
-
-
-typedef LinkageHelper<PPCLinkageHelperTraits> LH;
-
-CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
- int parameter_count,
- CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Zone* zone, Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties) {
- return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
- properties);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
- int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties, MachineType return_type) {
- return LH::GetStubCallDescriptor(isolate, zone, descriptor,
- stack_parameter_count, flags, properties,
- return_type);
-}
-
-
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- const MachineSignature* sig) {
- return LH::GetSimplifiedCDescriptor(zone, sig);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/preprocess-live-ranges.cc b/deps/v8/src/compiler/preprocess-live-ranges.cc
new file mode 100644
index 0000000000..fee3a3d98c
--- /dev/null
+++ b/deps/v8/src/compiler/preprocess-live-ranges.cc
@@ -0,0 +1,169 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/preprocess-live-ranges.h"
+#include "src/compiler/register-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+ } while (false)
+
+
+namespace {
+
+LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
+ LifetimePosition pos) {
+ DCHECK(range->Start() < pos && pos < range->End());
+ DCHECK(pos.IsStart() || pos.IsGapPosition() ||
+ (data->code()
+ ->GetInstructionBlock(pos.ToInstructionIndex())
+ ->last_instruction_index() != pos.ToInstructionIndex()));
+ LiveRange* result = data->NewChildRangeFor(range);
+ range->SplitAt(pos, result, data->allocation_zone());
+ TRACE("Split range %d(v%d) @%d => %d.\n", range->id(),
+ range->TopLevel()->id(), pos.ToInstructionIndex(), result->id());
+ return result;
+}
+
+
+LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
+ int instruction_index) {
+ LifetimePosition ret = LifetimePosition::Invalid();
+
+ ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
+ if (range->Start() >= ret || ret >= range->End()) {
+ return LifetimePosition::Invalid();
+ }
+ return ret;
+}
+
+
+LiveRange* SplitRangeAfterBlock(LiveRange* range, RegisterAllocationData* data,
+ const InstructionBlock* block) {
+ const InstructionSequence* code = data->code();
+ int last_index = block->last_instruction_index();
+ int outside_index = static_cast<int>(code->instructions().size());
+ bool has_handler = false;
+ for (auto successor_id : block->successors()) {
+ const InstructionBlock* successor = code->InstructionBlockAt(successor_id);
+ if (successor->IsHandler()) {
+ has_handler = true;
+ }
+ outside_index = Min(outside_index, successor->first_instruction_index());
+ }
+ int split_at = has_handler ? outside_index : last_index;
+ LifetimePosition after_block =
+ GetSplitPositionForInstruction(range, split_at);
+
+ if (after_block.IsValid()) {
+ return Split(range, data, after_block);
+ }
+
+ return range;
+}
+
+
+int GetFirstInstructionIndex(const UseInterval* interval) {
+ int ret = interval->start().ToInstructionIndex();
+ if (!interval->start().IsGapPosition() && !interval->start().IsStart()) {
+ ++ret;
+ }
+ return ret;
+}
+
+
+bool DoesSubsequenceClobber(const InstructionSequence* code, int start,
+ int end) {
+ for (int i = start; i <= end; ++i) {
+ if (code->InstructionAt(i)->IsCall()) return true;
+ }
+ return false;
+}
+
+
+void SplitRangeAtDeferredBlocksWithCalls(LiveRange* range,
+ RegisterAllocationData* data) {
+ DCHECK(!range->IsFixed());
+ DCHECK(!range->spilled());
+ if (range->TopLevel()->HasSpillOperand()) {
+ TRACE(
+ "Skipping deferred block analysis for live range %d because it has a "
+ "spill operand.\n",
+ range->TopLevel()->id());
+ return;
+ }
+
+ const InstructionSequence* code = data->code();
+ LiveRange* current_subrange = range;
+
+ UseInterval* interval = current_subrange->first_interval();
+
+ while (interval != nullptr) {
+ int first_index = GetFirstInstructionIndex(interval);
+ int last_index = interval->end().ToInstructionIndex();
+
+ if (last_index > code->LastInstructionIndex()) {
+ last_index = code->LastInstructionIndex();
+ }
+
+ interval = interval->next();
+
+ for (int index = first_index; index <= last_index;) {
+ const InstructionBlock* block = code->GetInstructionBlock(index);
+ int last_block_index = static_cast<int>(block->last_instruction_index());
+ int last_covered_index = Min(last_index, last_block_index);
+ int working_index = index;
+ index = block->last_instruction_index() + 1;
+
+ if (!block->IsDeferred() ||
+ !DoesSubsequenceClobber(code, working_index, last_covered_index)) {
+ continue;
+ }
+
+ TRACE("Deferred block B%d clobbers range %d(v%d).\n",
+ block->rpo_number().ToInt(), current_subrange->id(),
+ current_subrange->TopLevel()->id());
+ LifetimePosition block_start =
+ GetSplitPositionForInstruction(current_subrange, working_index);
+ LiveRange* block_and_after = nullptr;
+ if (block_start.IsValid()) {
+ block_and_after = Split(current_subrange, data, block_start);
+ } else {
+ block_and_after = current_subrange;
+ }
+ LiveRange* next = SplitRangeAfterBlock(block_and_after, data, block);
+ if (next != current_subrange) interval = next->first_interval();
+ current_subrange = next;
+ break;
+ }
+ }
+}
+}
+
+
+void PreprocessLiveRanges::PreprocessRanges() {
+ SplitRangesAroundDeferredBlocks();
+}
+
+
+void PreprocessLiveRanges::SplitRangesAroundDeferredBlocks() {
+ size_t live_range_count = data()->live_ranges().size();
+ for (size_t i = 0; i < live_range_count; i++) {
+ LiveRange* range = data()->live_ranges()[i];
+ if (range != nullptr && !range->IsEmpty() && !range->spilled() &&
+ !range->IsFixed() && !range->IsChild()) {
+ SplitRangeAtDeferredBlocksWithCalls(range, data());
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/preprocess-live-ranges.h b/deps/v8/src/compiler/preprocess-live-ranges.h
new file mode 100644
index 0000000000..aa852fc7ca
--- /dev/null
+++ b/deps/v8/src/compiler/preprocess-live-ranges.h
@@ -0,0 +1,35 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PREPROCESS_LIVE_RANGES_H_
+#define V8_PREPROCESS_LIVE_RANGES_H_
+
+#include "src/compiler/register-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+class PreprocessLiveRanges final {
+ public:
+ PreprocessLiveRanges(RegisterAllocationData* data, Zone* zone)
+ : data_(data), zone_(zone) {}
+ void PreprocessRanges();
+
+ private:
+ void SplitRangesAroundDeferredBlocks();
+
+ RegisterAllocationData* data() { return data_; }
+ Zone* zone() { return zone_; }
+
+ RegisterAllocationData* data_;
+ Zone* zone_;
+};
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+#endif // V8_PREPROCESS_LIVE_RANGES_H_
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 3e87ef5d97..8013f422f6 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/raw-machine-assembler.h"
+
#include "src/code-factory.h"
#include "src/compiler/pipeline.h"
-#include "src/compiler/raw-machine-assembler.h"
#include "src/compiler/scheduler.h"
namespace v8 {
@@ -12,17 +13,16 @@ namespace internal {
namespace compiler {
RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph,
- const MachineSignature* machine_sig,
+ CallDescriptor* call_descriptor,
MachineType word,
MachineOperatorBuilder::Flags flags)
- : GraphBuilder(isolate, graph),
+ : isolate_(isolate),
+ graph_(graph),
schedule_(new (zone()) Schedule(zone())),
machine_(zone(), word, flags),
common_(zone()),
- machine_sig_(machine_sig),
- call_descriptor_(
- Linkage::GetSimplifiedCDescriptor(graph->zone(), machine_sig)),
- parameters_(NULL),
+ call_descriptor_(call_descriptor),
+ parameters_(nullptr),
current_block_(schedule()->start()) {
int param_count = static_cast<int>(parameter_count());
Node* s = graph->NewNode(common_.Start(param_count));
@@ -40,9 +40,9 @@ Schedule* RawMachineAssembler::Export() {
// Compute the correct codegen order.
DCHECK(schedule_->rpo_order()->empty());
Scheduler::ComputeSpecialRPO(zone(), schedule_);
- // Invalidate MachineAssembler.
+ // Invalidate RawMachineAssembler.
Schedule* schedule = schedule_;
- schedule_ = NULL;
+ schedule_ = nullptr;
return schedule;
}
@@ -56,7 +56,7 @@ Node* RawMachineAssembler::Parameter(size_t index) {
void RawMachineAssembler::Goto(Label* label) {
DCHECK(current_block_ != schedule()->end());
schedule()->AddGoto(CurrentBlock(), Use(label));
- current_block_ = NULL;
+ current_block_ = nullptr;
}
@@ -65,7 +65,7 @@ void RawMachineAssembler::Branch(Node* condition, Label* true_val,
DCHECK(current_block_ != schedule()->end());
Node* branch = NewNode(common()->Branch(), condition);
schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val));
- current_block_ = NULL;
+ current_block_ = nullptr;
}
@@ -94,9 +94,25 @@ void RawMachineAssembler::Switch(Node* index, Label* default_label,
void RawMachineAssembler::Return(Node* value) {
- Node* ret = NewNode(common()->Return(), value);
+ Node* ret = graph()->NewNode(common()->Return(), value);
schedule()->AddReturn(CurrentBlock(), ret);
- current_block_ = NULL;
+ current_block_ = nullptr;
+}
+
+
+Node* RawMachineAssembler::CallN(CallDescriptor* desc, Node* function,
+ Node** args) {
+ int param_count =
+ static_cast<int>(desc->GetMachineSignature()->parameter_count());
+ Node** buffer = zone()->NewArray<Node*>(param_count + 1);
+ int index = 0;
+ buffer[index++] = function;
+ for (int i = 0; i < param_count; i++) {
+ buffer[index++] = args[i];
+ }
+ Node* call = graph()->NewNode(common()->Call(desc), param_count + 1, buffer);
+ schedule()->AddNode(CurrentBlock(), call);
+ return call;
}
@@ -216,8 +232,19 @@ Node* RawMachineAssembler::CallCFunction8(
}
+Node* RawMachineAssembler::TailCallInterpreterDispatch(
+ const CallDescriptor* call_descriptor, Node* target, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5) {
+ Node* tail_call =
+ graph()->NewNode(common()->TailCall(call_descriptor), target, arg1, arg2,
+ arg3, arg4, arg5, graph()->start(), graph()->start());
+ schedule()->AddTailCall(CurrentBlock(), tail_call);
+ return tail_call;
+}
+
+
void RawMachineAssembler::Bind(Label* label) {
- DCHECK(current_block_ == NULL);
+ DCHECK(current_block_ == nullptr);
DCHECK(!label->bound_);
label->bound_ = true;
current_block_ = EnsureBlock(label);
@@ -231,7 +258,7 @@ BasicBlock* RawMachineAssembler::Use(Label* label) {
BasicBlock* RawMachineAssembler::EnsureBlock(Label* label) {
- if (label->block_ == NULL) label->block_ = schedule()->NewBasicBlock();
+ if (label->block_ == nullptr) label->block_ = schedule()->NewBasicBlock();
return label->block_;
}
@@ -243,15 +270,11 @@ BasicBlock* RawMachineAssembler::CurrentBlock() {
Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
- Node** inputs, bool incomplete) {
- DCHECK(ScheduleValid());
- DCHECK(current_block_ != NULL);
- Node* node = graph()->NewNode(op, input_count, inputs, incomplete);
- BasicBlock* block = op->opcode() == IrOpcode::kParameter ? schedule()->start()
- : CurrentBlock();
- if (op->opcode() != IrOpcode::kReturn) {
- schedule()->AddNode(block, node);
- }
+ Node** inputs) {
+ DCHECK_NOT_NULL(schedule_);
+ DCHECK(current_block_ != nullptr);
+ Node* node = graph()->NewNode(op, input_count, inputs);
+ schedule()->AddNode(CurrentBlock(), node);
return node;
}
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index bc28e6c817..05f4ebab02 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -5,14 +5,14 @@
#ifndef V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
#define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
+#include "src/assembler.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-builder.h"
+#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
-
namespace v8 {
namespace internal {
namespace compiler {
@@ -20,21 +20,21 @@ namespace compiler {
class BasicBlock;
class Schedule;
-
-class RawMachineAssembler : public GraphBuilder {
+// The RawMachineAssembler produces a low-level IR graph. All nodes are wired
+// into a graph and also placed into a schedule immediately, hence subsequent
+// code generation can happen without the need for scheduling.
+//
+// In order to create a schedule on-the-fly, the assembler keeps track of basic
+// blocks by having one current basic block being populated and by referencing
+// other basic blocks through the use of labels.
+class RawMachineAssembler {
public:
class Label {
public:
Label() : block_(NULL), used_(false), bound_(false) {}
~Label() { DCHECK(bound_ || !used_); }
- BasicBlock* block() { return block_; }
-
private:
- // Private constructor for exit label.
- explicit Label(BasicBlock* block)
- : block_(block), used_(false), bound_(false) {}
-
BasicBlock* block_;
bool used_;
bool bound_;
@@ -43,18 +43,33 @@ class RawMachineAssembler : public GraphBuilder {
};
RawMachineAssembler(Isolate* isolate, Graph* graph,
- const MachineSignature* machine_sig,
+ CallDescriptor* call_descriptor,
MachineType word = kMachPtr,
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::Flag::kNoFlags);
- ~RawMachineAssembler() override {}
+ ~RawMachineAssembler() {}
+ Isolate* isolate() const { return isolate_; }
+ Graph* graph() const { return graph_; }
+ Schedule* schedule() { return schedule_; }
Zone* zone() const { return graph()->zone(); }
MachineOperatorBuilder* machine() { return &machine_; }
CommonOperatorBuilder* common() { return &common_; }
CallDescriptor* call_descriptor() const { return call_descriptor_; }
- size_t parameter_count() const { return machine_sig_->parameter_count(); }
- const MachineSignature* machine_sig() const { return machine_sig_; }
+ size_t parameter_count() const { return machine_sig()->parameter_count(); }
+ const MachineSignature* machine_sig() const {
+ return call_descriptor_->GetMachineSignature();
+ }
+ BasicBlock* CurrentBlock();
+
+ // Finalizes the schedule and exports it to be used for code generation. Note
+ // that this RawMachineAssembler becomes invalid after export.
+ Schedule* Export();
+
+ // ===========================================================================
+ // The following utility methods create new nodes with specific operators and
+ // place them into the current basic block. They don't perform control flow,
+ // hence will not switch the current basic block.
Node* UndefinedConstant() {
Unique<HeapObject> unique = Unique<HeapObject>::CreateImmovable(
@@ -90,6 +105,9 @@ class RawMachineAssembler : public GraphBuilder {
Unique<HeapObject> val = Unique<HeapObject>::CreateUninitialized(object);
return NewNode(common()->HeapConstant(val));
}
+ Node* HeapConstant(Unique<HeapObject> object) {
+ return NewNode(common()->HeapConstant(object));
+ }
Node* ExternalConstant(ExternalReference address) {
return NewNode(common()->ExternalConstant(address));
}
@@ -106,13 +124,14 @@ class RawMachineAssembler : public GraphBuilder {
return NewNode(machine()->Load(rep), base, index, graph()->start(),
graph()->start());
}
- void Store(MachineType rep, Node* base, Node* value) {
- Store(rep, base, IntPtrConstant(0), value);
+ Node* Store(MachineType rep, Node* base, Node* value) {
+ return Store(rep, base, IntPtrConstant(0), value);
}
- void Store(MachineType rep, Node* base, Node* index, Node* value) {
- NewNode(machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)), base,
- index, value, graph()->start(), graph()->start());
+ Node* Store(MachineType rep, Node* base, Node* index, Node* value) {
+ return NewNode(machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)),
+ base, index, value, graph()->start(), graph()->start());
}
+
// Arithmetic Operations.
Node* WordAnd(Node* a, Node* b) {
return NewNode(machine()->WordAnd(), a, b);
@@ -453,37 +472,35 @@ class RawMachineAssembler : public GraphBuilder {
Node* LoadFromPointer(void* address, MachineType rep, int32_t offset = 0) {
return Load(rep, PointerConstant(address), Int32Constant(offset));
}
- void StoreToPointer(void* address, MachineType rep, Node* node) {
- Store(rep, PointerConstant(address), node);
+ Node* StoreToPointer(void* address, MachineType rep, Node* node) {
+ return Store(rep, PointerConstant(address), node);
}
Node* StringConstant(const char* string) {
return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
}
- // Control flow.
- void Goto(Label* label);
- void Branch(Node* condition, Label* true_val, Label* false_val);
- void Switch(Node* index, Label* default_label, int32_t* case_values,
- Label** case_labels, size_t case_count);
+ // Call a given call descriptor and the given arguments.
+ Node* CallN(CallDescriptor* desc, Node* function, Node** args);
+
// Call through CallFunctionStub with lazy deopt and frame-state.
Node* CallFunctionStub0(Node* function, Node* receiver, Node* context,
Node* frame_state, CallFunctionFlags flags);
- // Call to a JS function with zero parameters.
+ // Call to a JS function with zero arguments.
Node* CallJS0(Node* function, Node* receiver, Node* context,
Node* frame_state);
- // Call to a runtime function with zero parameters.
+ // Call to a runtime function with zero arguments.
Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context,
Node* frame_state);
- // Call to a C function with zero parameters.
+ // Call to a C function with zero arguments.
Node* CallCFunction0(MachineType return_type, Node* function);
// Call to a C function with one parameter.
Node* CallCFunction1(MachineType return_type, MachineType arg0_type,
Node* function, Node* arg0);
- // Call to a C function with two parameters.
+ // Call to a C function with two arguments.
Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, Node* function, Node* arg0,
Node* arg1);
- // Call to a C function with eight parameters.
+ // Call to a C function with eight arguments.
Node* CallCFunction8(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, MachineType arg2_type,
MachineType arg3_type, MachineType arg4_type,
@@ -491,6 +508,19 @@ class RawMachineAssembler : public GraphBuilder {
MachineType arg7_type, Node* function, Node* arg0,
Node* arg1, Node* arg2, Node* arg3, Node* arg4,
Node* arg5, Node* arg6, Node* arg7);
+ Node* TailCallInterpreterDispatch(const CallDescriptor* call_descriptor,
+ Node* target, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5);
+
+ // ===========================================================================
+ // The following utility methods deal with control flow, hence might switch
+ // the current basic block or create new basic blocks for labels.
+
+ // Control flow.
+ void Goto(Label* label);
+ void Branch(Node* condition, Label* true_val, Label* false_val);
+ void Switch(Node* index, Label* default_label, int32_t* case_values,
+ Label** case_labels, size_t case_count);
void Return(Node* value);
void Bind(Label* label);
void Deoptimize(Node* state);
@@ -506,29 +536,59 @@ class RawMachineAssembler : public GraphBuilder {
return NewNode(common()->Phi(type, 4), n1, n2, n3, n4);
}
- // MachineAssembler is invalid after export.
- Schedule* Export();
+ // ===========================================================================
+ // The following generic node creation methods can be used for operators that
+ // are not covered by the above utility methods. There should rarely be a need
+ // to do that outside of testing though.
+
+ Node* NewNode(const Operator* op) {
+ return MakeNode(op, 0, static_cast<Node**>(NULL));
+ }
+
+ Node* NewNode(const Operator* op, Node* n1) { return MakeNode(op, 1, &n1); }
- protected:
- Node* MakeNode(const Operator* op, int input_count, Node** inputs,
- bool incomplete) final;
+ Node* NewNode(const Operator* op, Node* n1, Node* n2) {
+ Node* buffer[] = {n1, n2};
+ return MakeNode(op, arraysize(buffer), buffer);
+ }
- bool ScheduleValid() { return schedule_ != NULL; }
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
+ Node* buffer[] = {n1, n2, n3};
+ return MakeNode(op, arraysize(buffer), buffer);
+ }
- Schedule* schedule() {
- DCHECK(ScheduleValid());
- return schedule_;
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+ Node* buffer[] = {n1, n2, n3, n4};
+ return MakeNode(op, arraysize(buffer), buffer);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5) {
+ Node* buffer[] = {n1, n2, n3, n4, n5};
+ return MakeNode(op, arraysize(buffer), buffer);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6};
+ return MakeNode(op, arraysize(nodes), nodes);
+ }
+
+ Node* NewNode(const Operator* op, int value_input_count,
+ Node** value_inputs) {
+ return MakeNode(op, value_input_count, value_inputs);
}
private:
+ Node* MakeNode(const Operator* op, int input_count, Node** inputs);
BasicBlock* Use(Label* label);
BasicBlock* EnsureBlock(Label* label);
- BasicBlock* CurrentBlock();
+ Isolate* isolate_;
+ Graph* graph_;
Schedule* schedule_;
MachineOperatorBuilder machine_;
CommonOperatorBuilder common_;
- const MachineSignature* machine_sig_;
CallDescriptor* call_descriptor_;
Node** parameters_;
BasicBlock* current_block_;
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
index f23d24433f..0b775d29e1 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/register-allocator-verifier.cc
@@ -155,7 +155,7 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
int vreg = unallocated->virtual_register();
constraint->virtual_register_ = vreg;
if (unallocated->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
- constraint->type_ = kFixedSlot;
+ constraint->type_ = sequence()->IsFloat(vreg) ? kDoubleSlot : kSlot;
constraint->value_ = unallocated->fixed_slot_index();
} else {
switch (unallocated->extended_policy()) {
@@ -185,11 +185,7 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
}
break;
case UnallocatedOperand::MUST_HAVE_SLOT:
- if (sequence()->IsFloat(vreg)) {
- constraint->type_ = kDoubleSlot;
- } else {
- constraint->type_ = kSlot;
- }
+ constraint->type_ = sequence()->IsFloat(vreg) ? kDoubleSlot : kSlot;
break;
case UnallocatedOperand::SAME_AS_FIRST_INPUT:
constraint->type_ = kSameAsFirst;
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 5bf858a86c..101a10ae5b 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -196,8 +196,11 @@ void UsePosition::ResolveHint(UsePosition* use_pos) {
void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial);
+ DCHECK_EQ(kUnassignedRegister, AssignedRegisterField::decode(flags_));
flags_ = TypeField::encode(type) |
- RegisterBeneficialField::encode(register_beneficial);
+ RegisterBeneficialField::encode(register_beneficial) |
+ HintTypeField::encode(HintTypeField::decode(flags_)) |
+ AssignedRegisterField::encode(kUnassignedRegister);
}
@@ -256,7 +259,8 @@ LiveRange::LiveRange(int id, MachineType machine_type)
last_processed_use_(nullptr),
current_hint_position_(nullptr),
size_(kInvalidSize),
- weight_(kInvalidWeight) {
+ weight_(kInvalidWeight),
+ spilled_in_deferred_block_(false) {
DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type));
bits_ = SpillTypeField::encode(SpillType::kNoSpillType) |
AssignedRegisterField::encode(kUnassignedRegister) |
@@ -319,12 +323,90 @@ void LiveRange::SpillAtDefinition(Zone* zone, int gap_index,
}
+bool LiveRange::TryCommitSpillInDeferredBlock(
+ InstructionSequence* code, const InstructionOperand& spill_operand) {
+ DCHECK(!IsChild());
+
+ if (!FLAG_turbo_preprocess_ranges || IsEmpty() || HasNoSpillType() ||
+ spill_operand.IsConstant() || spill_operand.IsImmediate()) {
+ return false;
+ }
+
+ int count = 0;
+ for (const LiveRange* child = this; child != nullptr; child = child->next()) {
+ int first_instr = child->Start().ToInstructionIndex();
+
+ // If the range starts at instruction end, the first instruction index is
+ // the next one.
+ if (!child->Start().IsGapPosition() && !child->Start().IsStart()) {
+ ++first_instr;
+ }
+
+ // We only look at where the range starts. It doesn't matter where it ends:
+ // if it ends past this block, then either there is a phi there already,
+ // or ResolveControlFlow will adapt the last instruction gap of this block
+ // as if there were a phi. In either case, data flow will be correct.
+ const InstructionBlock* block = code->GetInstructionBlock(first_instr);
+
+ // If we have slot uses in a subrange, bail out, because we need the value
+ // on the stack before that use.
+ bool has_slot_use = child->NextSlotPosition(child->Start()) != nullptr;
+ if (!block->IsDeferred()) {
+ if (child->spilled() || has_slot_use) {
+ TRACE(
+ "Live Range %d must be spilled at definition: found a "
+ "slot-requiring non-deferred child range %d.\n",
+ TopLevel()->id(), child->id());
+ return false;
+ }
+ } else {
+ if (child->spilled() || has_slot_use) ++count;
+ }
+ }
+ if (count == 0) return false;
+
+ spill_start_index_ = -1;
+ spilled_in_deferred_block_ = true;
+
+ TRACE("Live Range %d will be spilled only in deferred blocks.\n", id());
+ // If we have ranges that aren't spilled but require the operand on the stack,
+ // make sure we insert the spill.
+ for (const LiveRange* child = this; child != nullptr; child = child->next()) {
+ if (!child->spilled() &&
+ child->NextSlotPosition(child->Start()) != nullptr) {
+ auto instr = code->InstructionAt(child->Start().ToInstructionIndex());
+ // Insert spill at the end to let live range connections happen at START.
+ auto move =
+ instr->GetOrCreateParallelMove(Instruction::END, code->zone());
+ InstructionOperand assigned = child->GetAssignedOperand();
+ if (TopLevel()->has_slot_use()) {
+ bool found = false;
+ for (auto move_op : *move) {
+ if (move_op->IsEliminated()) continue;
+ if (move_op->source().Equals(assigned) &&
+ move_op->destination().Equals(spill_operand)) {
+ found = true;
+ break;
+ }
+ }
+ if (found) continue;
+ }
+
+ move->AddMove(assigned, spill_operand);
+ }
+ }
+
+ return true;
+}
+
+
void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
const InstructionOperand& op,
bool might_be_duplicated) {
DCHECK_IMPLIES(op.IsConstant(), spills_at_definition_ == nullptr);
DCHECK(!IsChild());
auto zone = sequence->zone();
+
for (auto to_spill = spills_at_definition_; to_spill != nullptr;
to_spill = to_spill->next) {
auto instr = sequence->InstructionAt(to_spill->gap_index);
@@ -416,6 +498,16 @@ UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
}
+UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
+ for (UsePosition* pos = NextUsePosition(start); pos != nullptr;
+ pos = pos->next()) {
+ if (pos->type() != UsePositionType::kRequiresSlot) continue;
+ return pos;
+ }
+ return nullptr;
+}
+
+
bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
// We cannot spill a live range that has a use requiring a register
// at the current or the immediate next position.
@@ -1087,6 +1179,57 @@ bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
}
+void RegisterAllocationData::Print(
+ const InstructionSequence* instructionSequence) {
+ OFStream os(stdout);
+ PrintableInstructionSequence wrapper;
+ wrapper.register_configuration_ = config();
+ wrapper.sequence_ = instructionSequence;
+ os << wrapper << std::endl;
+}
+
+
+void RegisterAllocationData::Print(const Instruction* instruction) {
+ OFStream os(stdout);
+ PrintableInstruction wrapper;
+ wrapper.instr_ = instruction;
+ wrapper.register_configuration_ = config();
+ os << wrapper << std::endl;
+}
+
+
+void RegisterAllocationData::Print(const LiveRange* range, bool with_children) {
+ OFStream os(stdout);
+ PrintableLiveRange wrapper;
+ wrapper.register_configuration_ = config();
+ for (const LiveRange* i = range; i != nullptr; i = i->next()) {
+ wrapper.range_ = i;
+ os << wrapper << std::endl;
+ if (!with_children) break;
+ }
+}
+
+
+void RegisterAllocationData::Print(const InstructionOperand& op) {
+ OFStream os(stdout);
+ PrintableInstructionOperand wrapper;
+ wrapper.register_configuration_ = config();
+ wrapper.op_ = op;
+ os << wrapper << std::endl;
+}
+
+
+void RegisterAllocationData::Print(const MoveOperands* move) {
+ OFStream os(stdout);
+ PrintableInstructionOperand wrapper;
+ wrapper.register_configuration_ = config();
+ wrapper.op_ = move->destination();
+ os << wrapper << " = ";
+ wrapper.op_ = move->source();
+ os << wrapper << std::endl;
+}
+
+
ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
: data_(data) {}
@@ -1102,8 +1245,11 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
machine_type = data()->MachineTypeFor(virtual_register);
}
if (operand->HasFixedSlotPolicy()) {
- allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, machine_type,
- operand->fixed_slot_index());
+ AllocatedOperand::AllocatedKind kind =
+ IsFloatingPoint(machine_type) ? AllocatedOperand::DOUBLE_STACK_SLOT
+ : AllocatedOperand::STACK_SLOT;
+ allocated =
+ AllocatedOperand(kind, machine_type, operand->fixed_slot_index());
} else if (operand->HasFixedRegisterPolicy()) {
allocated = AllocatedOperand(AllocatedOperand::REGISTER, machine_type,
operand->fixed_register_index());
@@ -1221,7 +1367,7 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
// This value is produced on the stack, we never need to spill it.
if (first_output->IsStackSlot()) {
DCHECK(StackSlotOperand::cast(first_output)->index() <
- data()->frame()->GetSpillSlotCount());
+ data()->frame()->GetTotalFrameSlotCount());
range->SetSpillOperand(StackSlotOperand::cast(first_output));
range->SetSpillStartIndex(instr_index + 1);
assigned = true;
@@ -1493,7 +1639,15 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
int out_vreg = ConstantOperand::cast(output)->virtual_register();
live->Remove(out_vreg);
}
- Define(curr_position, output);
+ if (block->IsHandler() && index == block_start) {
+ // The register defined here is blocked from gap start - it is the
+ // exception value.
+ // TODO(mtrofin): should we explore an explicit opcode for
+ // the first instruction in the handler?
+ Define(LifetimePosition::GapFromInstructionIndex(index), output);
+ } else {
+ Define(curr_position, output);
+ }
}
if (instr->ClobbersRegisters()) {
@@ -2481,9 +2635,25 @@ void OperandAssigner::CommitAssignment() {
data()->GetPhiMapValueFor(range->id())->CommitAssignment(assigned);
}
if (!range->IsChild() && !spill_operand.IsInvalid()) {
- range->CommitSpillsAtDefinition(
- data()->code(), spill_operand,
- range->has_slot_use() || range->spilled());
+ // If this top level range has a child spilled in a deferred block, we use
+ // the range and control flow connection mechanism instead of spilling at
+ // definition. Refer to the ConnectLiveRanges and ResolveControlFlow
+ // phases. Normally, when we spill at definition, we do not insert a
+ // connecting move when a successor child range is spilled - because the
+ // spilled range picks up its value from the slot which was assigned at
+ // definition. For ranges that are determined to spill only in deferred
+ // blocks, we let ConnectLiveRanges and ResolveControlFlow insert such
+ // moves between ranges. Because of how the ranges are split around
+ // deferred blocks, this amounts to spilling and filling inside such
+ // blocks.
+ if (!range->TryCommitSpillInDeferredBlock(data()->code(),
+ spill_operand)) {
+ // Spill at definition if the range isn't spilled only in deferred
+ // blocks.
+ range->CommitSpillsAtDefinition(
+ data()->code(), spill_operand,
+ range->has_slot_use() || range->spilled());
+ }
}
}
}
@@ -2580,10 +2750,13 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
// Check if the live range is spilled and the safe point is after
// the spill position.
- if (!spill_operand.IsInvalid() &&
- safe_point >= range->spill_start_index()) {
+ int spill_index = range->IsSpilledOnlyInDeferredBlocks()
+ ? cur->Start().ToInstructionIndex()
+ : range->spill_start_index();
+
+ if (!spill_operand.IsInvalid() && safe_point >= spill_index) {
TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
- range->id(), range->spill_start_index(), safe_point);
+ range->id(), spill_index, safe_point);
map->RecordReference(AllocatedOperand::cast(spill_operand));
}
@@ -2780,7 +2953,8 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
const auto* pred_block = code()->InstructionBlockAt(pred);
array->Find(block, pred_block, &result);
if (result.cur_cover_ == result.pred_cover_ ||
- result.cur_cover_->spilled())
+ (!result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
+ result.cur_cover_->spilled()))
continue;
auto pred_op = result.pred_cover_->GetAssignedOperand();
auto cur_op = result.cur_cover_->GetAssignedOperand();
@@ -2819,12 +2993,13 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
DelayedInsertionMap delayed_insertion_map(local_zone);
for (auto first_range : data()->live_ranges()) {
if (first_range == nullptr || first_range->IsChild()) continue;
+ bool connect_spilled = first_range->IsSpilledOnlyInDeferredBlocks();
for (auto second_range = first_range->next(); second_range != nullptr;
first_range = second_range, second_range = second_range->next()) {
auto pos = second_range->Start();
// Add gap move if the two live ranges touch and there is no block
// boundary.
- if (second_range->spilled()) continue;
+ if (!connect_spilled && second_range->spilled()) continue;
if (first_range->End() != pos) continue;
if (data()->IsBlockBoundary(pos) &&
!CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index 83f95cbac6..2e63d36e12 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -140,6 +140,10 @@ class LifetimePosition final {
return LifetimePosition(kMaxInt);
}
+ static inline LifetimePosition FromInt(int value) {
+ return LifetimePosition(value);
+ }
+
private:
static const int kHalfStep = 2;
static const int kStep = 2 * kHalfStep;
@@ -330,6 +334,9 @@ class LiveRange final : public ZoneObject {
// range and which follows both start and last processed use position
UsePosition* NextRegisterPosition(LifetimePosition start) const;
+ // Returns the first use position requiring stack slot, or nullptr.
+ UsePosition* NextSlotPosition(LifetimePosition start) const;
+
// Returns use position for which register is beneficial in this live
// range and which follows both start and last processed use position
UsePosition* NextUsePositionRegisterIsBeneficial(
@@ -397,6 +404,16 @@ class LiveRange final : public ZoneObject {
void CommitSpillsAtDefinition(InstructionSequence* sequence,
const InstructionOperand& operand,
bool might_be_duplicated);
+ // This must be applied on top level ranges.
+ // If all the children of this range are spilled in deferred blocks, and if
+ // for any non-spilled child with a use position requiring a slot, that range
+ // is contained in a deferred block, mark the range as
+ // IsSpilledOnlyInDeferredBlocks, so that we avoid spilling at definition,
+ // and instead let the LiveRangeConnector perform the spills within the
+ // deferred blocks. If so, we insert here spills for non-spilled ranges
+ // with slot use positions.
+ bool TryCommitSpillInDeferredBlock(InstructionSequence* code,
+ const InstructionOperand& spill_operand);
void SetSpillStartIndex(int start) {
spill_start_index_ = Min(start, spill_start_index_);
@@ -433,6 +450,10 @@ class LiveRange final : public ZoneObject {
float weight() const { return weight_; }
void set_weight(float weight) { weight_ = weight; }
+ bool IsSpilledOnlyInDeferredBlocks() const {
+ return spilled_in_deferred_block_;
+ }
+
static const int kInvalidSize = -1;
static const float kInvalidWeight;
static const float kMaxWeight;
@@ -485,6 +506,10 @@ class LiveRange final : public ZoneObject {
// greedy: a metric for resolving conflicts between ranges with an assigned
// register and ranges that intersect them and need a register.
float weight_;
+
+ // TODO(mtrofin): generalize spilling after definition, currently specialized
+ // just for spill in a single deferred block.
+ bool spilled_in_deferred_block_;
DISALLOW_COPY_AND_ASSIGN(LiveRange);
};
@@ -626,6 +651,12 @@ class RegisterAllocationData final : public ZoneObject {
PhiMapValue* GetPhiMapValueFor(int virtual_register);
bool IsBlockBoundary(LifetimePosition pos) const;
+ void Print(const InstructionSequence* instructionSequence);
+ void Print(const Instruction* instruction);
+ void Print(const LiveRange* range, bool with_children = false);
+ void Print(const InstructionOperand& op);
+ void Print(const MoveOperands* move);
+
private:
Zone* const allocation_zone_;
Frame* const frame_;
@@ -912,14 +943,24 @@ class ReferenceMapPopulator final : public ZoneObject {
};
+// Insert moves of the form
+//
+// Operand(child_(k+1)) = Operand(child_k)
+//
+// where child_k and child_(k+1) are consecutive children of a range (so
+// child_k->next() == child_(k+1)), and Operand(...) refers to the
+// assigned operand, be it a register or a slot.
class LiveRangeConnector final : public ZoneObject {
public:
explicit LiveRangeConnector(RegisterAllocationData* data);
- // Phase 8: reconnect split ranges with moves.
+ // Phase 8: reconnect split ranges with moves, when the control flow
+ // between the ranges is trivial (no branches).
void ConnectRanges(Zone* local_zone);
- // Phase 9: insert moves to connect ranges across basic blocks.
+ // Phase 9: insert moves to connect ranges across basic blocks, when the
+ // control flow between them cannot be trivially resolved, such as joining
+ // branches.
void ResolveControlFlow(Zone* local_zone);
private:
@@ -928,6 +969,7 @@ class LiveRangeConnector final : public ZoneObject {
Zone* code_zone() const { return code()->zone(); }
bool CanEagerlyResolveControlFlow(const InstructionBlock* block) const;
+
void ResolveControlFlow(const InstructionBlock* block,
const InstructionOperand& cur_op,
const InstructionBlock* pred,
diff --git a/deps/v8/src/compiler/register-configuration.cc b/deps/v8/src/compiler/register-configuration.cc
index a3d3be1790..ebe6cfe23c 100644
--- a/deps/v8/src/compiler/register-configuration.cc
+++ b/deps/v8/src/compiler/register-configuration.cc
@@ -25,7 +25,7 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
1,
1,
#else
- DoubleRegister::kMaxNumAllocatableRegisters,
+ DoubleRegister::NumAllocatableRegisters(),
DoubleRegister::NumAllocatableAliasedRegisters(),
#endif
general_register_name_table_,
@@ -35,7 +35,9 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
general_register_name_table_[i] = Register::AllocationIndexToString(i);
}
- for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) {
+ DCHECK_GE(DoubleRegister::kMaxNumAllocatableRegisters,
+ DoubleRegister::NumAllocatableRegisters());
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
double_register_name_table_[i] =
DoubleRegister::AllocationIndexToString(i);
}
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 006b6ab28f..eafd3b6a85 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -414,6 +414,29 @@ class RepresentationSelector {
}
}
+ void VisitCall(Node* node, SimplifiedLowering* lowering) {
+ const CallDescriptor* desc = OpParameter<const CallDescriptor*>(node->op());
+ const MachineSignature* sig = desc->GetMachineSignature();
+ int params = static_cast<int>(sig->parameter_count());
+ // Propagate representation information from call descriptor.
+ for (int i = 0; i < node->InputCount(); i++) {
+ if (i == 0) {
+ // The target of the call.
+ ProcessInput(node, i, 0);
+ } else if ((i - 1) < params) {
+ ProcessInput(node, i, sig->GetParam(i - 1));
+ } else {
+ ProcessInput(node, i, 0);
+ }
+ }
+
+ if (sig->return_count() > 0) {
+ SetOutput(node, desc->GetMachineSignature()->GetReturn());
+ } else {
+ SetOutput(node, kMachAnyTagged);
+ }
+ }
+
void VisitStateValues(Node* node) {
if (phase_ == PROPAGATE) {
for (int i = 0; i < node->InputCount(); i++) {
@@ -533,6 +556,8 @@ class RepresentationSelector {
return VisitSelect(node, use, lowering);
case IrOpcode::kPhi:
return VisitPhi(node, use, lowering);
+ case IrOpcode::kCall:
+ return VisitCall(node, lowering);
//------------------------------------------------------------------
// JavaScript operators.
@@ -1341,7 +1366,7 @@ void SimplifiedLowering::DoStoreElement(Node* node) {
Node* SimplifiedLowering::StringComparison(Node* node, bool requires_ordering) {
Runtime::FunctionId f =
- requires_ordering ? Runtime::kStringCompareRT : Runtime::kStringEquals;
+ requires_ordering ? Runtime::kStringCompare : Runtime::kStringEquals;
ExternalReference ref(f, jsgraph()->isolate());
Operator::Properties props = node->op()->properties();
// TODO(mstarzinger): We should call StringCompareStub here instead, once an
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 1460cb04f3..447bf9e5e9 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -9,6 +9,7 @@
#include "src/compiler/machine-type.h"
#include "src/handles.h"
+#include "src/objects.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/source-position.cc b/deps/v8/src/compiler/source-position.cc
index 48361ecac7..aba77b36f3 100644
--- a/deps/v8/src/compiler/source-position.cc
+++ b/deps/v8/src/compiler/source-position.cc
@@ -5,6 +5,7 @@
#include "src/compiler/source-position.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-aux-data.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 3f4bd27ec6..aabcf4b5a8 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -7,6 +7,7 @@
#include "src/base/flags.h"
#include "src/base/lazy-instance.h"
#include "src/bootstrapper.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node.h"
@@ -571,11 +572,11 @@ Bounds Typer::Visitor::TypeIfException(Node* node) {
Bounds Typer::Visitor::TypeParameter(Node* node) {
- int param = OpParameter<int>(node);
- Type::FunctionType* function_type = typer_->function_type();
- if (function_type != nullptr && param >= 0 &&
- param < static_cast<int>(function_type->Arity())) {
- return Bounds(Type::None(), function_type->Parameter(param));
+ if (Type::FunctionType* function_type = typer_->function_type()) {
+ int const index = ParameterIndexOf(node->op());
+ if (index >= 0 && index < function_type->Arity()) {
+ return Bounds(Type::None(), function_type->Parameter(index));
+ }
}
return Bounds::Unbounded(zone());
}
@@ -1558,6 +1559,8 @@ Bounds Typer::Visitor::TypeJSCallRuntime(Node* node) {
return Bounds(Type::None(), Type::Range(0, 32, zone()));
case Runtime::kInlineStringGetLength:
return Bounds(Type::None(), Type::Range(0, String::kMaxLength, zone()));
+ case Runtime::kInlineToObject:
+ return Bounds(Type::None(), Type::Receiver());
default:
break;
}
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index bdce083201..3ad07ad7de 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -7,6 +7,7 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
#include "src/scopes.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
@@ -48,10 +49,10 @@ class X64OperandConverter : public InstructionOperandConverter {
Operand ToOperand(InstructionOperand* op, int extra = 0) {
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- // The linkage computes where all spill slots are located.
- FrameOffset offset = linkage()->GetFrameOffset(
- AllocatedOperand::cast(op)->index(), frame(), extra);
- return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
+ FrameOffset offset =
+ linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ return Operand(offset.from_stack_pointer() ? rsp : rbp,
+ offset.offset() + extra);
}
static size_t NextOffset(size_t* offset) {
@@ -567,8 +568,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ jmp(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
- int entry = Code::kHeaderSize - kHeapObjectTag;
- __ jmp(Operand(reg, entry));
+ __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(reg);
}
break;
}
@@ -1219,6 +1220,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} else {
if (instr->InputAt(0)->IsRegister()) {
__ pushq(i.InputRegister(0));
+ } else if (instr->InputAt(0)->IsDoubleRegister()) {
+ // TODO(titzer): use another machine instruction?
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
} else {
__ pushq(i.InputOperand(0));
}
@@ -1456,50 +1461,28 @@ void CodeGenerator::AssembleDeoptimizerCall(
}
+namespace {
+
+static const int kQuadWordSize = 16;
+
+} // namespace
+
+
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
__ pushq(rbp);
__ movq(rbp, rsp);
- int register_save_area_size = 0;
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) { // Save callee-saved registers.
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- __ pushq(Register::from_code(i));
- register_save_area_size += kPointerSize;
- }
- }
- const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
- if (saves_fp != 0) { // Save callee-saved XMM registers.
- const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
- const int stack_size = saves_fp_count * 16;
- // Adjust the stack pointer.
- __ subp(rsp, Immediate(stack_size));
- // Store the registers on the stack.
- int slot_idx = 0;
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
- if (!((1 << i) & saves_fp)) continue;
- __ movdqu(Operand(rsp, 16 * slot_idx), XMMRegister::from_code(i));
- slot_idx++;
- }
- register_save_area_size += stack_size;
- }
- if (register_save_area_size > 0) {
- frame()->SetRegisterSaveAreaSize(register_save_area_size);
- }
} else if (descriptor->IsJSFunctionCall()) {
CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
} else if (needs_frame_) {
__ StubPrologue();
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
}
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1512,77 +1495,91 @@ void CodeGenerator::AssemblePrologue() {
osr_pc_offset_ = __ pc_offset();
// TODO(titzer): cannot address target function == local #-1
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
- stack_slots -= frame()->GetOsrStackSlotCount();
+ stack_shrink_slots -=
+ static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
+ }
+
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) {
+ stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+ }
+ if (stack_shrink_slots > 0) {
+ __ subq(rsp, Immediate(stack_shrink_slots * kPointerSize));
+ }
+
+ if (saves_fp != 0) { // Save callee-saved XMM registers.
+ const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
+ const int stack_size = saves_fp_count * kQuadWordSize;
+ // Adjust the stack pointer.
+ __ subp(rsp, Immediate(stack_size));
+ // Store the registers on the stack.
+ int slot_idx = 0;
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ if (!((1 << i) & saves_fp)) continue;
+ __ movdqu(Operand(rsp, kQuadWordSize * slot_idx),
+ XMMRegister::from_code(i));
+ slot_idx++;
+ }
+ frame()->AllocateSavedCalleeRegisterSlots(saves_fp_count *
+ (kQuadWordSize / kPointerSize));
}
- if (stack_slots > 0) {
- __ subq(rsp, Immediate(stack_slots * kPointerSize));
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ __ pushq(Register::from_code(i));
+ frame()->AllocateSavedCalleeRegisterSlots(1);
+ }
}
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
- if (frame()->GetRegisterSaveAreaSize() > 0) {
- // Remove this frame's spill slots first.
- if (stack_slots > 0) {
- __ addq(rsp, Immediate(stack_slots * kPointerSize));
- }
- // Restore registers.
- const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
- if (saves_fp != 0) {
- const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
- const int stack_size = saves_fp_count * 16;
- // Load the registers from the stack.
- int slot_idx = 0;
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
- if (!((1 << i) & saves_fp)) continue;
- __ movdqu(XMMRegister::from_code(i), Operand(rsp, 16 * slot_idx));
- slot_idx++;
- }
- // Adjust the stack pointer.
- __ addp(rsp, Immediate(stack_size));
- }
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) {
- for (int i = 0; i < Register::kNumRegisters; i++) {
- if (!((1 << i) & saves)) continue;
- __ popq(Register::from_code(i));
- }
- }
- __ popq(rbp); // Pop caller's frame pointer.
- __ ret(0);
- } else {
- // No saved registers.
- __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
- __ popq(rbp); // Pop caller's frame pointer.
- __ ret(0);
+
+ // Restore registers.
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ if (!((1 << i) & saves)) continue;
+ __ popq(Register::from_code(i));
}
+ }
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) {
+ const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
+ const int stack_size = saves_fp_count * kQuadWordSize;
+ // Load the registers from the stack.
+ int slot_idx = 0;
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ if (!((1 << i) & saves_fp)) continue;
+ __ movdqu(XMMRegister::from_code(i),
+ Operand(rsp, kQuadWordSize * slot_idx));
+ slot_idx++;
+ }
+ // Adjust the stack pointer.
+ __ addp(rsp, Immediate(stack_size));
+ }
+
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
+ __ popq(rbp); // Pop caller's frame pointer.
} else if (descriptor->IsJSFunctionCall() || needs_frame_) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ jmp(&return_label_);
+ return;
} else {
__ bind(&return_label_);
__ movq(rsp, rbp); // Move stack pointer back to frame pointer.
__ popq(rbp); // Pop caller's frame pointer.
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : (info()->IsStub()
- ? info()->code_stub()->GetStackParameterCount()
- : 0);
- if (pop_count == 0) {
- __ Ret();
- } else {
- __ Ret(pop_count * kPointerSize, rbx);
- }
}
- } else {
- __ Ret();
}
+ size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ // Might need rcx for scratch if pop_size is too big.
+ DCHECK_EQ(0, descriptor->CalleeSavedRegisters() & rcx.bit());
+ __ Ret(static_cast<int>(pop_size), rcx);
}
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 6d7fca472e..0bcd526322 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -1053,21 +1053,26 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
- if (Node* node = buffer.pushed_nodes[n]) {
- int const slot = static_cast<int>(n);
- InstructionOperand value =
- g.CanBeImmediate(node) ? g.UseImmediate(node) : g.UseRegister(node);
+ if (Node* input = buffer.pushed_nodes[n]) {
+ int slot = static_cast<int>(n);
+ InstructionOperand value = g.CanBeImmediate(input)
+ ? g.UseImmediate(input)
+ : g.UseRegister(input);
Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
} else {
// Push any stack arguments.
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- // TODO(titzer): handle pushing double parameters.
+ for (Node* input : base::Reversed(buffer.pushed_nodes)) {
+ // TODO(titzer): X64Push cannot handle stack->stack double moves
+ // because there is no way to encode fixed double slots.
InstructionOperand value =
- g.CanBeImmediate(node)
- ? g.UseImmediate(node)
- : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
+ g.CanBeImmediate(input)
+ ? g.UseImmediate(input)
+ : IsSupported(ATOM) ||
+ sequence()->IsFloat(GetVirtualRegister(input))
+ ? g.UseRegister(input)
+ : g.Use(input);
Emit(kX64Push, g.NoOutput(), value);
}
}
@@ -1156,12 +1161,12 @@ void InstructionSelector::VisitTailCall(Node* node) {
InitializeCallBuffer(node, &buffer, true, true);
// Push any stack arguments.
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ for (Node* input : base::Reversed(buffer.pushed_nodes)) {
// TODO(titzer): Handle pushing double parameters.
InstructionOperand value =
- g.CanBeImmediate(node)
- ? g.UseImmediate(node)
- : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
+ g.CanBeImmediate(input)
+ ? g.UseImmediate(input)
+ : IsSupported(ATOM) ? g.UseRegister(input) : g.Use(input);
Emit(kX64Push, g.NoOutput(), value);
}
diff --git a/deps/v8/src/compiler/x64/linkage-x64.cc b/deps/v8/src/compiler/x64/linkage-x64.cc
deleted file mode 100644
index b272eb6f76..0000000000
--- a/deps/v8/src/compiler/x64/linkage-x64.cc
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/linkage-impl.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-#ifdef _WIN64
-const bool kWin64 = true;
-#else
-const bool kWin64 = false;
-#endif
-
-struct X64LinkageHelperTraits {
- static Register ReturnValueReg() { return rax; }
- static Register ReturnValue2Reg() { return rdx; }
- static Register JSCallFunctionReg() { return rdi; }
- static Register ContextReg() { return rsi; }
- static Register RuntimeCallFunctionReg() { return rbx; }
- static Register RuntimeCallArgCountReg() { return rax; }
- static RegList CCalleeSaveRegisters() {
- if (kWin64) {
- return rbx.bit() | rdi.bit() | rsi.bit() | r12.bit() | r13.bit() |
- r14.bit() | r15.bit();
- } else {
- return rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit();
- }
- }
- static RegList CCalleeSaveFPRegisters() {
- if (kWin64) {
- return (1 << xmm6.code()) | (1 << xmm7.code()) | (1 << xmm8.code()) |
- (1 << xmm9.code()) | (1 << xmm10.code()) | (1 << xmm11.code()) |
- (1 << xmm12.code()) | (1 << xmm13.code()) | (1 << xmm14.code()) |
- (1 << xmm15.code());
- } else {
- return 0;
- }
- }
- static Register CRegisterParameter(int i) {
- if (kWin64) {
- static Register register_parameters[] = {rcx, rdx, r8, r9};
- return register_parameters[i];
- } else {
- static Register register_parameters[] = {rdi, rsi, rdx, rcx, r8, r9};
- return register_parameters[i];
- }
- }
- static int CRegisterParametersLength() { return kWin64 ? 4 : 6; }
- static int CStackBackingStoreLength() { return kWin64 ? 4 : 0; }
-};
-
-typedef LinkageHelper<X64LinkageHelperTraits> LH;
-
-CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
- int parameter_count,
- CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Zone* zone, Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties) {
- return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
- properties);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
- int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties, MachineType return_type) {
- return LH::GetStubCallDescriptor(isolate, zone, descriptor,
- stack_parameter_count, flags, properties,
- return_type);
-}
-
-
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- const MachineSignature* sig) {
- return LH::GetSimplifiedCDescriptor(zone, sig);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
index 1335d3f568..d39fda6761 100644
--- a/deps/v8/src/compiler/x87/code-generator-x87.cc
+++ b/deps/v8/src/compiler/x87/code-generator-x87.cc
@@ -7,8 +7,10 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
#include "src/scopes.h"
#include "src/x87/assembler-x87.h"
+#include "src/x87/frames-x87.h"
#include "src/x87/macro-assembler-x87.h"
namespace v8 {
@@ -38,15 +40,12 @@ class X87OperandConverter : public InstructionOperandConverter {
if (op->IsRegister()) {
DCHECK(extra == 0);
return Operand(ToRegister(op));
- } else if (op->IsDoubleRegister()) {
- DCHECK(extra == 0);
- UNIMPLEMENTED();
}
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- // The linkage computes where all spill slots are located.
- FrameOffset offset = linkage()->GetFrameOffset(
- AllocatedOperand::cast(op)->index(), frame(), extra);
- return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+ FrameOffset offset =
+ linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ return Operand(offset.from_stack_pointer() ? esp : ebp,
+ offset.offset() + extra);
}
Operand HighOperand(InstructionOperand* op) {
@@ -338,7 +337,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ jmp(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
- __ jmp(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
+ __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(reg);
}
break;
}
@@ -1105,7 +1105,29 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Push:
- if (HasImmediateInput(instr, 0)) {
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
+ if (allocated.machine_type() == kRepFloat32) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fst_s(Operand(esp, 0));
+ } else {
+ DCHECK(allocated.machine_type() == kRepFloat64);
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fst_d(Operand(esp, 0));
+ }
+ } else if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
+ if (allocated.machine_type() == kRepFloat32) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fld_s(i.InputOperand(0));
+ __ fstp_s(MemOperand(esp, 0));
+ } else {
+ DCHECK(allocated.machine_type() == kRepFloat64);
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fld_d(i.InputOperand(0));
+ __ fstp_d(MemOperand(esp, 0));
+ }
+ } else if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
} else {
__ push(i.InputOperand(0));
@@ -1509,34 +1531,22 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
// Assemble a prologue similar the to cdecl calling convention.
__ push(ebp);
__ mov(ebp, esp);
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) { // Save callee-saved registers.
- int register_save_area_size = 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- __ push(Register::from_code(i));
- register_save_area_size += kPointerSize;
- }
- frame()->SetRegisterSaveAreaSize(register_save_area_size);
- }
} else if (descriptor->IsJSFunctionCall()) {
// TODO(turbofan): this prologue is redundant with OSR, but needed for
// code aging.
CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
} else if (needs_frame_) {
__ StubPrologue();
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
}
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1549,13 +1559,23 @@ void CodeGenerator::AssemblePrologue() {
osr_pc_offset_ = __ pc_offset();
// TODO(titzer): cannot address target function == local #-1
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
- stack_slots -= frame()->GetOsrStackSlotCount();
+ stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ }
+
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (stack_shrink_slots > 0) {
+ __ sub(esp, Immediate(stack_shrink_slots * kPointerSize));
}
- if (stack_slots > 0) {
- // Allocate the stack slots used by this frame.
- __ sub(esp, Immediate(stack_slots * kPointerSize));
+ if (saves != 0) { // Save callee-saved registers.
+ DCHECK(!info()->is_osr());
+ int pushed = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ __ push(Register::from_code(i));
+ ++pushed;
+ }
+ frame()->AllocateSavedCalleeRegisterSlots(pushed);
}
// Initailize FPU state.
@@ -1566,50 +1586,35 @@ void CodeGenerator::AssemblePrologue() {
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (frame()->GetRegisterSaveAreaSize() > 0) {
- // Remove this frame's spill slots first.
- if (stack_slots > 0) {
- __ add(esp, Immediate(stack_slots * kPointerSize));
- }
- // Restore registers.
- if (saves != 0) {
- for (int i = 0; i < Register::kNumRegisters; i++) {
- if (!((1 << i) & saves)) continue;
- __ pop(Register::from_code(i));
- }
- }
- __ pop(ebp); // Pop caller's frame pointer.
- __ ret(0);
- } else {
- // No saved registers.
- __ mov(esp, ebp); // Move stack pointer back to frame pointer.
- __ pop(ebp); // Pop caller's frame pointer.
- __ ret(0);
+
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ // Restore registers.
+ if (saves != 0) {
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ if (!((1 << i) & saves)) continue;
+ __ pop(Register::from_code(i));
}
+ }
+
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ __ mov(esp, ebp); // Move stack pointer back to frame pointer.
+ __ pop(ebp); // Pop caller's frame pointer.
} else if (descriptor->IsJSFunctionCall() || needs_frame_) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ jmp(&return_label_);
+ return;
} else {
__ bind(&return_label_);
__ mov(esp, ebp); // Move stack pointer back to frame pointer.
__ pop(ebp); // Pop caller's frame pointer.
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : (info()->IsStub()
- ? info()->code_stub()->GetStackParameterCount()
- : 0);
- if (pop_count == 0) {
- __ ret(0);
- } else {
- __ Ret(pop_count * kPointerSize, ebx);
- }
}
- } else {
+ }
+ if (pop_count == 0) {
__ ret(0);
+ } else {
+ __ Ret(pop_count * kPointerSize, ebx);
}
}
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
index d350738e0b..95aa70ac92 100644
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -841,21 +841,26 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
- if (Node* node = buffer.pushed_nodes[n]) {
+ if (Node* input = buffer.pushed_nodes[n]) {
int const slot = static_cast<int>(n);
- InstructionOperand value =
- g.CanBeImmediate(node) ? g.UseImmediate(node) : g.UseRegister(node);
+ InstructionOperand value = g.CanBeImmediate(input)
+ ? g.UseImmediate(input)
+ : g.UseRegister(input);
Emit(kX87Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
} else {
// Push any stack arguments.
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ for (Node* input : base::Reversed(buffer.pushed_nodes)) {
// TODO(titzer): handle pushing double parameters.
+ if (input == nullptr) continue;
InstructionOperand value =
- g.CanBeImmediate(node)
- ? g.UseImmediate(node)
- : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
+ g.CanBeImmediate(input)
+ ? g.UseImmediate(input)
+ : IsSupported(ATOM) ||
+ sequence()->IsFloat(GetVirtualRegister(input))
+ ? g.UseRegister(input)
+ : g.Use(input);
Emit(kX87Push, g.NoOutput(), value);
}
}
@@ -945,12 +950,12 @@ void InstructionSelector::VisitTailCall(Node* node) {
InitializeCallBuffer(node, &buffer, true, true);
// Push any stack arguments.
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ for (Node* input : base::Reversed(buffer.pushed_nodes)) {
// TODO(titzer): Handle pushing double parameters.
InstructionOperand value =
- g.CanBeImmediate(node)
- ? g.UseImmediate(node)
- : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
+ g.CanBeImmediate(input)
+ ? g.UseImmediate(input)
+ : IsSupported(ATOM) ? g.UseRegister(input) : g.Use(input);
Emit(kX87Push, g.NoOutput(), value);
}
diff --git a/deps/v8/src/compiler/x87/linkage-x87.cc b/deps/v8/src/compiler/x87/linkage-x87.cc
deleted file mode 100644
index 69e1b3de59..0000000000
--- a/deps/v8/src/compiler/x87/linkage-x87.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/linkage-impl.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-struct X87LinkageHelperTraits {
- static Register ReturnValueReg() { return eax; }
- static Register ReturnValue2Reg() { return edx; }
- static Register JSCallFunctionReg() { return edi; }
- static Register ContextReg() { return esi; }
- static Register RuntimeCallFunctionReg() { return ebx; }
- static Register RuntimeCallArgCountReg() { return eax; }
- static RegList CCalleeSaveRegisters() {
- return esi.bit() | edi.bit() | ebx.bit();
- }
- static RegList CCalleeSaveFPRegisters() { return 0; }
- static Register CRegisterParameter(int i) { return no_reg; }
- static int CRegisterParametersLength() { return 0; }
- static int CStackBackingStoreLength() { return 0; }
-};
-
-typedef LinkageHelper<X87LinkageHelperTraits> LH;
-
-CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
- int parameter_count,
- CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Zone* zone, Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties) {
- return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
- properties);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
- int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties, MachineType return_type) {
- return LH::GetStubCallDescriptor(isolate, zone, descriptor,
- stack_parameter_count, flags, properties,
- return_type);
-}
-
-
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- const MachineSignature* sig) {
- return LH::GetSimplifiedCDescriptor(zone, sig);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/context-measure.cc b/deps/v8/src/context-measure.cc
new file mode 100644
index 0000000000..42a08be0af
--- /dev/null
+++ b/deps/v8/src/context-measure.cc
@@ -0,0 +1,76 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/context-measure.h"
+
+namespace v8 {
+namespace internal {
+
+ContextMeasure::ContextMeasure(Context* context)
+ : context_(context),
+ root_index_map_(context->GetIsolate()),
+ recursion_depth_(0),
+ count_(0),
+ size_(0) {
+ DCHECK(context_->IsNativeContext());
+ Object* next_link = context_->get(Context::NEXT_CONTEXT_LINK);
+ MeasureObject(context_);
+ MeasureDeferredObjects();
+ context_->set(Context::NEXT_CONTEXT_LINK, next_link);
+}
+
+
+bool ContextMeasure::IsShared(HeapObject* object) {
+ if (object->IsScript()) return true;
+ if (object->IsSharedFunctionInfo()) return true;
+ if (object->IsScopeInfo()) return true;
+ if (object->IsCode() && !Code::cast(object)->is_optimized_code()) return true;
+ if (object->IsExecutableAccessorInfo()) return true;
+ if (object->IsWeakCell()) return true;
+ return false;
+}
+
+
+void ContextMeasure::MeasureObject(HeapObject* object) {
+ if (back_reference_map_.Lookup(object).is_valid()) return;
+ if (root_index_map_.Lookup(object) != RootIndexMap::kInvalidRootIndex) return;
+ if (IsShared(object)) return;
+ back_reference_map_.Add(object, BackReference::DummyReference());
+ recursion_depth_++;
+ if (recursion_depth_ > kMaxRecursion) {
+ deferred_objects_.Add(object);
+ } else {
+ MeasureAndRecurse(object);
+ }
+ recursion_depth_--;
+}
+
+
+void ContextMeasure::MeasureDeferredObjects() {
+ while (deferred_objects_.length() > 0) {
+ MeasureAndRecurse(deferred_objects_.RemoveLast());
+ }
+}
+
+
+void ContextMeasure::MeasureAndRecurse(HeapObject* object) {
+ int size = object->Size();
+ count_++;
+ size_ += size;
+ Map* map = object->map();
+ MeasureObject(map);
+ object->IterateBody(map->instance_type(), size, this);
+}
+
+
+void ContextMeasure::VisitPointers(Object** start, Object** end) {
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsSmi()) continue;
+ MeasureObject(HeapObject::cast(*current));
+ }
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/context-measure.h b/deps/v8/src/context-measure.h
new file mode 100644
index 0000000000..f01c37418f
--- /dev/null
+++ b/deps/v8/src/context-measure.h
@@ -0,0 +1,47 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CONTEXT_MEASURE_H_
+#define V8_CONTEXT_MEASURE_H_
+
+#include "src/snapshot/serialize.h"
+
+namespace v8 {
+namespace internal {
+
+class ContextMeasure : public ObjectVisitor {
+ public:
+ explicit ContextMeasure(Context* context);
+
+ int Size() { return size_; }
+ int Count() { return count_; }
+
+ void VisitPointers(Object** start, Object** end);
+
+ private:
+ void MeasureObject(HeapObject* object);
+ void MeasureDeferredObjects();
+ void MeasureAndRecurse(HeapObject* object);
+ bool IsShared(HeapObject* object);
+
+ Context* context_;
+
+ BackReferenceMap back_reference_map_;
+ RootIndexMap root_index_map_;
+
+ static const int kMaxRecursion = 16;
+ int recursion_depth_;
+ List<HeapObject*> deferred_objects_;
+
+ int count_;
+ int size_;
+
+ DisallowHeapAllocation no_gc_;
+
+ DISALLOW_COPY_AND_ASSIGN(ContextMeasure);
+};
+}
+} // namespace v8::internal
+
+#endif // V8_CONTEXT_MEASURE_H_
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index b29405b505..ef850452ce 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/contexts.h"
#include "src/bootstrapper.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/scopeinfo.h"
namespace v8 {
@@ -20,8 +20,11 @@ Handle<ScriptContextTable> ScriptContextTable::Extend(
CHECK(used >= 0 && length > 0 && used < length);
if (used + 1 == length) {
CHECK(length < Smi::kMaxValue / 2);
- result = Handle<ScriptContextTable>::cast(
- FixedArray::CopySize(table, length * 2));
+ Isolate* isolate = table->GetIsolate();
+ Handle<FixedArray> copy =
+ isolate->factory()->CopyFixedArrayAndGrow(table, length);
+ copy->set_map(isolate->heap()->script_context_table_map());
+ result = Handle<ScriptContextTable>::cast(copy);
} else {
result = table;
}
@@ -140,7 +143,6 @@ static void GetAttributesAndBindingFlags(VariableMode mode,
PropertyAttributes* attributes,
BindingFlags* binding_flags) {
switch (mode) {
- case INTERNAL: // Fall through.
case VAR:
*attributes = NONE;
*binding_flags = MUTABLE_IS_INITIALIZED;
@@ -366,8 +368,6 @@ void Context::InitializeGlobalSlots() {
int context_locals = scope_info->ContextLocalCount();
int index = Context::MIN_CONTEXT_SLOTS + context_locals;
for (int i = 0; i < context_globals; i++) {
- // Clear both read and write slots.
- set(index++, empty_cell);
set(index++, empty_cell);
}
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 1210848384..9e6fc0e4f5 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -81,25 +81,30 @@ enum BindingFlags {
V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
+ V(FLOAT32X4_FUNCTION_INDEX, JSFunction, float32x4_function) \
+ V(INT32X4_FUNCTION_INDEX, JSFunction, int32x4_function) \
+ V(BOOL32X4_FUNCTION_INDEX, JSFunction, bool32x4_function) \
+ V(INT16X8_FUNCTION_INDEX, JSFunction, int16x8_function) \
+ V(BOOL16X8_FUNCTION_INDEX, JSFunction, bool16x8_function) \
+ V(INT8X16_FUNCTION_INDEX, JSFunction, int8x16_function) \
+ V(BOOL8X16_FUNCTION_INDEX, JSFunction, bool8x16_function) \
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(JS_OBJECT_STRONG_MAP_INDEX, Map, js_object_strong_map) \
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \
V(JS_ARRAY_STRONG_MAPS_INDEX, Object, js_array_strong_maps) \
- V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
- V(JSON_OBJECT_INDEX, JSObject, json_object) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
+ V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \
V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun) \
V(TO_STRING_FUN_INDEX, JSFunction, to_string_fun) \
V(TO_DETAIL_STRING_FUN_INDEX, JSFunction, to_detail_string_fun) \
- V(TO_OBJECT_FUN_INDEX, JSFunction, to_object_fun) \
+ V(NO_SIDE_EFFECT_TO_STRING_FUN_INDEX, JSFunction, \
+ no_side_effect_to_string_fun) \
V(TO_INTEGER_FUN_INDEX, JSFunction, to_integer_fun) \
- V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun) \
- V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun) \
V(TO_LENGTH_FUN_INDEX, JSFunction, to_length_fun) \
V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
@@ -114,16 +119,6 @@ enum BindingFlags {
V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \
V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
- V(INT8_ARRAY_EXTERNAL_MAP_INDEX, Map, int8_array_external_map) \
- V(UINT8_ARRAY_EXTERNAL_MAP_INDEX, Map, uint8_array_external_map) \
- V(INT16_ARRAY_EXTERNAL_MAP_INDEX, Map, int16_array_external_map) \
- V(UINT16_ARRAY_EXTERNAL_MAP_INDEX, Map, uint16_array_external_map) \
- V(INT32_ARRAY_EXTERNAL_MAP_INDEX, Map, int32_array_external_map) \
- V(UINT32_ARRAY_EXTERNAL_MAP_INDEX, Map, uint32_array_external_map) \
- V(FLOAT32_ARRAY_EXTERNAL_MAP_INDEX, Map, float32_array_external_map) \
- V(FLOAT64_ARRAY_EXTERNAL_MAP_INDEX, Map, float64_array_external_map) \
- V(UINT8_CLAMPED_ARRAY_EXTERNAL_MAP_INDEX, Map, \
- uint8_clamped_array_external_map) \
V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map) \
V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \
@@ -142,11 +137,8 @@ enum BindingFlags {
V(SLOW_ALIASED_ARGUMENTS_MAP_INDEX, Map, slow_aliased_arguments_map) \
V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map) \
V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
- V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
- V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun) \
V(FUNCTION_CACHE_INDEX, ObjectHashTable, function_cache) \
- V(JSFUNCTION_RESULT_CACHES_INDEX, FixedArray, jsfunction_result_caches) \
V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache) \
V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
@@ -157,10 +149,17 @@ enum BindingFlags {
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(MAP_CACHE_INDEX, Object, map_cache) \
V(STRONG_MAP_CACHE_INDEX, Object, strong_map_cache) \
- V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
+ V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
+ V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
+ V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
+ V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
+ V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
+ V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
+ V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
+ V(MAKE_ERROR_FUNCTION_INDEX, JSFunction, make_error_function) \
V(PROMISE_STATUS_INDEX, Symbol, promise_status) \
V(PROMISE_VALUE_INDEX, Symbol, promise_value) \
V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
@@ -169,8 +168,18 @@ enum BindingFlags {
V(PROMISE_CHAIN_INDEX, JSFunction, promise_chain) \
V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
+ V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction, \
+ promise_has_user_defined_reject_handler) \
V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
to_complete_property_descriptor) \
+ V(OBJECT_DEFINE_OWN_PROPERTY_INDEX, JSFunction, object_define_own_property) \
+ V(OBJECT_GET_OWN_PROPERTY_DESCROPTOR_INDEX, JSFunction, \
+ object_get_own_property_descriptor) \
+ V(MESSAGE_GET_LINE_NUMBER_INDEX, JSFunction, message_get_line_number) \
+ V(MESSAGE_GET_COLUMN_NUMBER_INDEX, JSFunction, message_get_column_number) \
+ V(MESSAGE_GET_SOURCE_LINE_INDEX, JSFunction, message_get_source_line) \
+ V(STACK_OVERFLOW_BOILERPLATE_INDEX, JSObject, stack_overflow_boilerplate) \
+ V(JSON_SERIALIZE_ADAPTER_INDEX, JSFunction, json_serialize_adapter) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
@@ -206,7 +215,7 @@ enum BindingFlags {
V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
V(NATIVES_UTILS_OBJECT_INDEX, Object, natives_utils_object) \
- V(EXTRAS_EXPORTS_OBJECT_INDEX, JSObject, extras_exports_object)
+ V(EXTRAS_EXPORTS_OBJECT_INDEX, JSObject, extras_binding_object)
// A table of all script contexts. Every loaded top-level script with top-level
@@ -330,141 +339,11 @@ class Context: public FixedArray {
// scope info (block contexts), or the module instance (module contexts).
EXTENSION_INDEX,
GLOBAL_OBJECT_INDEX,
- MIN_CONTEXT_SLOTS,
-
- // This slot holds the thrown value in catch contexts.
- THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
// These slots are only in native contexts.
- GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
- SECURITY_TOKEN_INDEX,
- SLOPPY_ARGUMENTS_MAP_INDEX,
- FAST_ALIASED_ARGUMENTS_MAP_INDEX,
- SLOW_ALIASED_ARGUMENTS_MAP_INDEX,
- STRICT_ARGUMENTS_MAP_INDEX,
- REGEXP_RESULT_MAP_INDEX,
- SLOPPY_FUNCTION_MAP_INDEX,
- SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX,
- STRICT_FUNCTION_MAP_INDEX,
- STRONG_FUNCTION_MAP_INDEX,
- SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
- STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
- STRONG_CONSTRUCTOR_MAP_INDEX,
- BOUND_FUNCTION_MAP_INDEX,
- INITIAL_OBJECT_PROTOTYPE_INDEX,
- INITIAL_ARRAY_PROTOTYPE_INDEX,
- BOOLEAN_FUNCTION_INDEX,
- NUMBER_FUNCTION_INDEX,
- STRING_FUNCTION_INDEX,
- STRING_FUNCTION_PROTOTYPE_MAP_INDEX,
- SYMBOL_FUNCTION_INDEX,
- OBJECT_FUNCTION_INDEX,
- JS_OBJECT_STRONG_MAP_INDEX,
- INTERNAL_ARRAY_FUNCTION_INDEX,
- ARRAY_FUNCTION_INDEX,
- JS_ARRAY_MAPS_INDEX,
- JS_ARRAY_STRONG_MAPS_INDEX,
- DATE_FUNCTION_INDEX,
- JSON_OBJECT_INDEX,
- REGEXP_FUNCTION_INDEX,
- CREATE_DATE_FUN_INDEX,
- TO_NUMBER_FUN_INDEX,
- TO_STRING_FUN_INDEX,
- TO_DETAIL_STRING_FUN_INDEX,
- TO_OBJECT_FUN_INDEX,
- TO_INTEGER_FUN_INDEX,
- TO_UINT32_FUN_INDEX,
- TO_INT32_FUN_INDEX,
- TO_BOOLEAN_FUN_INDEX,
- GLOBAL_EVAL_FUN_INDEX,
- ARRAY_BUFFER_FUN_INDEX,
- ARRAY_BUFFER_MAP_INDEX,
- UINT8_ARRAY_FUN_INDEX,
- INT8_ARRAY_FUN_INDEX,
- UINT16_ARRAY_FUN_INDEX,
- INT16_ARRAY_FUN_INDEX,
- UINT32_ARRAY_FUN_INDEX,
- INT32_ARRAY_FUN_INDEX,
- FLOAT32_ARRAY_FUN_INDEX,
- FLOAT64_ARRAY_FUN_INDEX,
- UINT8_CLAMPED_ARRAY_FUN_INDEX,
- INT8_ARRAY_EXTERNAL_MAP_INDEX,
- UINT8_ARRAY_EXTERNAL_MAP_INDEX,
- INT16_ARRAY_EXTERNAL_MAP_INDEX,
- UINT16_ARRAY_EXTERNAL_MAP_INDEX,
- INT32_ARRAY_EXTERNAL_MAP_INDEX,
- UINT32_ARRAY_EXTERNAL_MAP_INDEX,
- FLOAT32_ARRAY_EXTERNAL_MAP_INDEX,
- FLOAT64_ARRAY_EXTERNAL_MAP_INDEX,
- UINT8_CLAMPED_ARRAY_EXTERNAL_MAP_INDEX,
- DATA_VIEW_FUN_INDEX,
- SHARED_ARRAY_BUFFER_FUN_INDEX,
- MESSAGE_LISTENERS_INDEX,
- MAKE_MESSAGE_FUN_INDEX,
- GET_STACK_TRACE_LINE_INDEX,
- CONFIGURE_GLOBAL_INDEX,
- FUNCTION_CACHE_INDEX,
- JSFUNCTION_RESULT_CACHES_INDEX,
- NORMALIZED_MAP_CACHE_INDEX,
- RUNTIME_CONTEXT_INDEX,
- CALL_AS_FUNCTION_DELEGATE_INDEX,
- CALL_AS_CONSTRUCTOR_DELEGATE_INDEX,
- SCRIPT_FUNCTION_INDEX,
- OPAQUE_REFERENCE_FUNCTION_INDEX,
- CONTEXT_EXTENSION_FUNCTION_INDEX,
- OUT_OF_MEMORY_INDEX,
- EMBEDDER_DATA_INDEX,
- ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
- ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
- RUN_MICROTASKS_INDEX,
- ENQUEUE_MICROTASK_INDEX,
- PROMISE_STATUS_INDEX,
- PROMISE_VALUE_INDEX,
- PROMISE_CREATE_INDEX,
- PROMISE_RESOLVE_INDEX,
- PROMISE_REJECT_INDEX,
- PROMISE_CHAIN_INDEX,
- PROMISE_CATCH_INDEX,
- PROMISE_THEN_INDEX,
- TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
- DERIVED_HAS_TRAP_INDEX,
- DERIVED_GET_TRAP_INDEX,
- DERIVED_SET_TRAP_INDEX,
- PROXY_ENUMERATE_INDEX,
- OBSERVERS_NOTIFY_CHANGE_INDEX,
- OBSERVERS_ENQUEUE_SPLICE_INDEX,
- OBSERVERS_BEGIN_SPLICE_INDEX,
- OBSERVERS_END_SPLICE_INDEX,
- NATIVE_OBJECT_OBSERVE_INDEX,
- NATIVE_OBJECT_GET_NOTIFIER_INDEX,
- NATIVE_OBJECT_NOTIFIER_PERFORM_CHANGE,
- SLOPPY_GENERATOR_FUNCTION_MAP_INDEX,
- STRICT_GENERATOR_FUNCTION_MAP_INDEX,
- STRONG_GENERATOR_FUNCTION_MAP_INDEX,
- GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX,
- ITERATOR_RESULT_MAP_INDEX,
- JS_MAP_FUN_INDEX,
- JS_MAP_MAP_INDEX,
- JS_SET_FUN_INDEX,
- JS_SET_MAP_INDEX,
- MAP_GET_METHOD_INDEX,
- MAP_SET_METHOD_INDEX,
- MAP_HAS_METHOD_INDEX,
- MAP_DELETE_METHOD_INDEX,
- SET_ADD_METHOD_INDEX,
- SET_HAS_METHOD_INDEX,
- SET_DELETE_METHOD_INDEX,
- MAP_FROM_ARRAY_INDEX,
- SET_FROM_ARRAY_INDEX,
- MAP_ITERATOR_MAP_INDEX,
- SET_ITERATOR_MAP_INDEX,
- ARRAY_VALUES_ITERATOR_INDEX,
- SCRIPT_CONTEXT_TABLE_INDEX,
- MAP_CACHE_INDEX,
- STRONG_MAP_CACHE_INDEX,
- TO_LENGTH_FUN_INDEX,
- NATIVES_UTILS_OBJECT_INDEX,
- EXTRAS_EXPORTS_OBJECT_INDEX,
+#define NATIVE_CONTEXT_SLOT(index, type, name) index,
+ NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_SLOT)
+#undef NATIVE_CONTEXT_SLOT
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
@@ -475,7 +354,11 @@ class Context: public FixedArray {
// Total number of slots.
NATIVE_CONTEXT_SLOTS,
- FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST
+ FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST,
+
+ MIN_CONTEXT_SLOTS = GLOBAL_PROXY_INDEX,
+ // This slot holds the thrown value in catch contexts.
+ THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
};
// Direct slot access.
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index ae87dc4d31..42d617d017 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -18,6 +18,7 @@
#include "src/base/platform/platform.h"
#include "src/conversions.h"
#include "src/double.h"
+#include "src/objects-inl.h"
#include "src/scanner.h"
#include "src/strtod.h"
@@ -97,6 +98,73 @@ int32_t DoubleToInt32(double x) {
}
+bool IsSmiDouble(double value) {
+ return !IsMinusZero(value) && value >= Smi::kMinValue &&
+ value <= Smi::kMaxValue && value == FastI2D(FastD2I(value));
+}
+
+
+bool IsInt32Double(double value) {
+ return !IsMinusZero(value) && value >= kMinInt && value <= kMaxInt &&
+ value == FastI2D(FastD2I(value));
+}
+
+
+bool IsUint32Double(double value) {
+ return !IsMinusZero(value) && value >= 0 && value <= kMaxUInt32 &&
+ value == FastUI2D(FastD2UI(value));
+}
+
+
+int32_t NumberToInt32(Object* number) {
+ if (number->IsSmi()) return Smi::cast(number)->value();
+ return DoubleToInt32(number->Number());
+}
+
+
+uint32_t NumberToUint32(Object* number) {
+ if (number->IsSmi()) return Smi::cast(number)->value();
+ return DoubleToUint32(number->Number());
+}
+
+
+bool TryNumberToSize(Isolate* isolate, Object* number, size_t* result) {
+ SealHandleScope shs(isolate);
+ if (number->IsSmi()) {
+ int value = Smi::cast(number)->value();
+ DCHECK(static_cast<unsigned>(Smi::kMaxValue) <=
+ std::numeric_limits<size_t>::max());
+ if (value >= 0) {
+ *result = static_cast<size_t>(value);
+ return true;
+ }
+ return false;
+ } else {
+ DCHECK(number->IsHeapNumber());
+ double value = HeapNumber::cast(number)->value();
+ if (value >= 0 && value <= std::numeric_limits<size_t>::max()) {
+ *result = static_cast<size_t>(value);
+ return true;
+ } else {
+ return false;
+ }
+ }
+}
+
+
+size_t NumberToSize(Isolate* isolate, Object* number) {
+ size_t result = 0;
+ bool is_valid = TryNumberToSize(isolate, number, &result);
+ CHECK(is_valid);
+ return result;
+}
+
+
+uint32_t DoubleToUint32(double x) {
+ return static_cast<uint32_t>(DoubleToInt32(x));
+}
+
+
template <class Iterator, class EndMark>
bool SubStringEquals(Iterator* current,
EndMark end,
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 1413bc2eaa..9b6d83b6bb 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -9,7 +9,6 @@
#include "src/base/logging.h"
#include "src/handles.h"
-#include "src/objects.h"
#include "src/utils.h"
namespace v8 {
@@ -90,9 +89,7 @@ inline int32_t DoubleToInt32(double x);
// This function should match the exact semantics of ECMA-262 9.6.
-inline uint32_t DoubleToUint32(double x) {
- return static_cast<uint32_t>(DoubleToInt32(x));
-}
+inline uint32_t DoubleToUint32(double x);
// Enumeration for allowing octals and ignoring junk when converting
@@ -157,88 +154,41 @@ static inline bool IsMinusZero(double value) {
}
-static inline bool IsSmiDouble(double value) {
- return !IsMinusZero(value) && value >= Smi::kMinValue &&
- value <= Smi::kMaxValue && value == FastI2D(FastD2I(value));
-}
+inline bool IsSmiDouble(double value);
// Integer32 is an integer that can be represented as a signed 32-bit
// integer. It has to be in the range [-2^31, 2^31 - 1].
// We also have to check for negative 0 as it is not an Integer32.
-static inline bool IsInt32Double(double value) {
- return !IsMinusZero(value) &&
- value >= kMinInt &&
- value <= kMaxInt &&
- value == FastI2D(FastD2I(value));
-}
+inline bool IsInt32Double(double value);
// UInteger32 is an integer that can be represented as an unsigned 32-bit
// integer. It has to be in the range [0, 2^32 - 1].
// We also have to check for negative 0 as it is not a UInteger32.
-static inline bool IsUint32Double(double value) {
- return !IsMinusZero(value) &&
- value >= 0 &&
- value <= kMaxUInt32 &&
- value == FastUI2D(FastD2UI(value));
-}
+inline bool IsUint32Double(double value);
// Convert from Number object to C integer.
-inline int32_t NumberToInt32(Object* number) {
- if (number->IsSmi()) return Smi::cast(number)->value();
- return DoubleToInt32(number->Number());
-}
-
-
-inline uint32_t NumberToUint32(Object* number) {
- if (number->IsSmi()) return Smi::cast(number)->value();
- return DoubleToUint32(number->Number());
-}
+inline int32_t NumberToInt32(Object* number);
+inline uint32_t NumberToUint32(Object* number);
double StringToDouble(UnicodeCache* unicode_cache, Handle<String> string,
int flags, double empty_string_val = 0.0);
-inline bool TryNumberToSize(Isolate* isolate,
- Object* number, size_t* result) {
- SealHandleScope shs(isolate);
- if (number->IsSmi()) {
- int value = Smi::cast(number)->value();
- DCHECK(static_cast<unsigned>(Smi::kMaxValue)
- <= std::numeric_limits<size_t>::max());
- if (value >= 0) {
- *result = static_cast<size_t>(value);
- return true;
- }
- return false;
- } else {
- DCHECK(number->IsHeapNumber());
- double value = HeapNumber::cast(number)->value();
- if (value >= 0 &&
- value <= std::numeric_limits<size_t>::max()) {
- *result = static_cast<size_t>(value);
- return true;
- } else {
- return false;
- }
- }
-}
+inline bool TryNumberToSize(Isolate* isolate, Object* number, size_t* result);
+
// Converts a number into size_t.
-inline size_t NumberToSize(Isolate* isolate,
- Object* number) {
- size_t result = 0;
- bool is_valid = TryNumberToSize(isolate, number, &result);
- CHECK(is_valid);
- return result;
-}
+inline size_t NumberToSize(Isolate* isolate, Object* number);
// returns DoubleToString(StringToDouble(string)) == string
bool IsSpecialIndex(UnicodeCache* unicode_cache, String* string);
-} } // namespace v8::internal
+
+} // namespace internal
+} // namespace v8
#endif // V8_CONVERSIONS_H_
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 80839b96d8..db0c70a8f4 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -658,7 +658,6 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
SC(megamorphic_stub_cache_updates, V8.MegamorphicStubCacheUpdates) \
SC(array_function_runtime, V8.ArrayFunctionRuntime) \
SC(array_function_native, V8.ArrayFunctionNative) \
- SC(for_in, V8.ForIn) \
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(fast_new_closure_total, V8.FastNewClosureTotal) \
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index f48499e5f0..638f915138 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -33,6 +33,9 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator,
last_processed_code_event_id_(0) {}
+ProfilerEventsProcessor::~ProfilerEventsProcessor() {}
+
+
void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
event.generic.order = ++last_code_event_id_;
events_buffer_.Enqueue(event);
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index 7d8669bf61..7619a1ef6d 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -11,7 +11,7 @@
#include "src/circular-queue.h"
#include "src/compiler.h"
#include "src/sampler.h"
-#include "src/unbound-queue-inl.h"
+#include "src/unbound-queue.h"
namespace v8 {
namespace internal {
@@ -129,7 +129,7 @@ class ProfilerEventsProcessor : public base::Thread {
ProfilerEventsProcessor(ProfileGenerator* generator,
Sampler* sampler,
base::TimeDelta period);
- virtual ~ProfilerEventsProcessor() {}
+ virtual ~ProfilerEventsProcessor();
// Thread control.
virtual void Run();
diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc
deleted file mode 100644
index a356bd4b4e..0000000000
--- a/deps/v8/src/d8-debug.cc
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/d8.h"
-#include "src/d8-debug.h"
-
-namespace v8 {
-
-void PrintPrompt(bool is_running) {
- const char* prompt = is_running? "> " : "dbg> ";
- printf("%s", prompt);
- fflush(stdout);
-}
-
-
-void HandleDebugEvent(const Debug::EventDetails& event_details) {
- // TODO(svenpanne) There should be a way to retrieve this in the callback.
- Isolate* isolate = Isolate::GetCurrent();
- HandleScope scope(isolate);
-
- DebugEvent event = event_details.GetEvent();
- // Check for handled event.
- if (event != Break && event != Exception && event != AfterCompile) {
- return;
- }
-
- TryCatch try_catch(isolate);
-
- // Get the toJSONProtocol function on the event and get the JSON format.
- Local<String> to_json_fun_name =
- String::NewFromUtf8(isolate, "toJSONProtocol");
- Handle<Object> event_data = event_details.GetEventData();
- Local<Function> to_json_fun =
- Local<Function>::Cast(event_data->Get(to_json_fun_name));
- Local<Value> event_json = to_json_fun->Call(event_data, 0, NULL);
- if (try_catch.HasCaught()) {
- Shell::ReportException(isolate, &try_catch);
- return;
- }
-
- // Print the event details.
- Handle<Object> details =
- Shell::DebugMessageDetails(isolate, Handle<String>::Cast(event_json));
- if (try_catch.HasCaught()) {
- Shell::ReportException(isolate, &try_catch);
- return;
- }
- String::Utf8Value str(details->Get(String::NewFromUtf8(isolate, "text")));
- if (str.length() == 0) {
- // Empty string is used to signal not to process this event.
- return;
- }
- printf("%s\n", *str);
-
- // Get the debug command processor.
- Local<String> fun_name =
- String::NewFromUtf8(isolate, "debugCommandProcessor");
- Handle<Object> exec_state = event_details.GetExecutionState();
- Local<Function> fun = Local<Function>::Cast(exec_state->Get(fun_name));
- Local<Object> cmd_processor =
- Local<Object>::Cast(fun->Call(exec_state, 0, NULL));
- if (try_catch.HasCaught()) {
- Shell::ReportException(isolate, &try_catch);
- return;
- }
-
- static const int kBufferSize = 256;
- bool running = false;
- while (!running) {
- char command[kBufferSize];
- PrintPrompt(running);
- char* str = fgets(command, kBufferSize, stdin);
- if (str == NULL) break;
-
- // Ignore empty commands.
- if (strlen(command) == 0) continue;
-
- TryCatch try_catch(isolate);
-
- // Convert the debugger command to a JSON debugger request.
- Handle<Value> request = Shell::DebugCommandToJSONRequest(
- isolate, String::NewFromUtf8(isolate, command));
- if (try_catch.HasCaught()) {
- Shell::ReportException(isolate, &try_catch);
- continue;
- }
-
- // If undefined is returned the command was handled internally and there is
- // no JSON to send.
- if (request->IsUndefined()) {
- continue;
- }
-
- Handle<String> fun_name;
- Handle<Function> fun;
- // All the functions used below take one argument.
- static const int kArgc = 1;
- Handle<Value> args[kArgc];
-
- // Invoke the JavaScript to convert the debug command line to a JSON
- // request, invoke the JSON request and convert the JSON respose to a text
- // representation.
- fun_name = String::NewFromUtf8(isolate, "processDebugRequest");
- fun = Handle<Function>::Cast(cmd_processor->Get(fun_name));
- args[0] = request;
- Handle<Value> response_val = fun->Call(cmd_processor, kArgc, args);
- if (try_catch.HasCaught()) {
- Shell::ReportException(isolate, &try_catch);
- continue;
- }
- Handle<String> response = Handle<String>::Cast(response_val);
-
- // Convert the debugger response into text details and the running state.
- Handle<Object> response_details =
- Shell::DebugMessageDetails(isolate, response);
- if (try_catch.HasCaught()) {
- Shell::ReportException(isolate, &try_catch);
- continue;
- }
- String::Utf8Value text_str(
- response_details->Get(String::NewFromUtf8(isolate, "text")));
- if (text_str.length() > 0) {
- printf("%s\n", *text_str);
- }
- running = response_details->Get(String::NewFromUtf8(isolate, "running"))
- ->ToBoolean(isolate)
- ->Value();
- }
-}
-
-} // namespace v8
diff --git a/deps/v8/src/d8-debug.h b/deps/v8/src/d8-debug.h
deleted file mode 100644
index 1a693cc86d..0000000000
--- a/deps/v8/src/d8-debug.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_D8_DEBUG_H_
-#define V8_D8_DEBUG_H_
-
-
-#include "src/d8.h"
-#include "src/debug.h"
-
-
-namespace v8 {
-
-void HandleDebugEvent(const Debug::EventDetails& event_details);
-
-} // namespace v8
-
-
-#endif // V8_D8_DEBUG_H_
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index 3bca14f530..36d83b53cf 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -170,12 +170,14 @@ class ExecArgs {
ExecArgs() {
exec_args_[0] = NULL;
}
- bool Init(Isolate* isolate, Handle<Value> arg0, Handle<Array> command_args) {
+ bool Init(Isolate* isolate, Local<Value> arg0, Local<Array> command_args) {
String::Utf8Value prog(arg0);
if (*prog == NULL) {
const char* message =
"os.system(): String conversion of program name failed";
- isolate->ThrowException(String::NewFromUtf8(isolate, message));
+ isolate->ThrowException(
+ String::NewFromUtf8(isolate, message, NewStringType::kNormal)
+ .ToLocalChecked());
return false;
}
int len = prog.length() + 3;
@@ -184,13 +186,17 @@ class ExecArgs {
exec_args_[0] = c_arg;
int i = 1;
for (unsigned j = 0; j < command_args->Length(); i++, j++) {
- Handle<Value> arg(command_args->Get(Integer::New(isolate, j)));
+ Local<Value> arg(
+ command_args->Get(isolate->GetCurrentContext(),
+ Integer::New(isolate, j)).ToLocalChecked());
String::Utf8Value utf8_arg(arg);
if (*utf8_arg == NULL) {
exec_args_[i] = NULL; // Consistent state for destructor.
const char* message =
"os.system(): String conversion of argument failed.";
- isolate->ThrowException(String::NewFromUtf8(isolate, message));
+ isolate->ThrowException(
+ String::NewFromUtf8(isolate, message, NewStringType::kNormal)
+ .ToLocalChecked());
return false;
}
int len = utf8_arg.length() + 1;
@@ -225,19 +231,27 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
int* total_timeout) {
if (args.Length() > 3) {
if (args[3]->IsNumber()) {
- *total_timeout = args[3]->Int32Value();
+ *total_timeout = args[3]
+ ->Int32Value(args.GetIsolate()->GetCurrentContext())
+ .FromJust();
} else {
- args.GetIsolate()->ThrowException(String::NewFromUtf8(
- args.GetIsolate(), "system: Argument 4 must be a number"));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(),
+ "system: Argument 4 must be a number",
+ NewStringType::kNormal).ToLocalChecked());
return false;
}
}
if (args.Length() > 2) {
if (args[2]->IsNumber()) {
- *read_timeout = args[2]->Int32Value();
+ *read_timeout = args[2]
+ ->Int32Value(args.GetIsolate()->GetCurrentContext())
+ .FromJust();
} else {
- args.GetIsolate()->ThrowException(String::NewFromUtf8(
- args.GetIsolate(), "system: Argument 3 must be a number"));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(),
+ "system: Argument 3 must be a number",
+ NewStringType::kNormal).ToLocalChecked());
return false;
}
}
@@ -282,7 +296,9 @@ static bool ChildLaunchedOK(Isolate* isolate, int* exec_error_fds) {
bytes_read = read(exec_error_fds[kReadFD], &err, sizeof(err));
} while (bytes_read == -1 && errno == EINTR);
if (bytes_read != 0) {
- isolate->ThrowException(String::NewFromUtf8(isolate, strerror(err)));
+ isolate->ThrowException(
+ String::NewFromUtf8(isolate, strerror(err), NewStringType::kNormal)
+ .ToLocalChecked());
return false;
}
return true;
@@ -291,12 +307,10 @@ static bool ChildLaunchedOK(Isolate* isolate, int* exec_error_fds) {
// Accumulates the output from the child in a string handle. Returns true if it
// succeeded or false if an exception was thrown.
-static Handle<Value> GetStdout(Isolate* isolate,
- int child_fd,
- const struct timeval& start_time,
- int read_timeout,
- int total_timeout) {
- Handle<String> accumulator = String::Empty(isolate);
+static Local<Value> GetStdout(Isolate* isolate, int child_fd,
+ const struct timeval& start_time,
+ int read_timeout, int total_timeout) {
+ Local<String> accumulator = String::Empty(isolate);
int fullness = 0;
static const int kStdoutReadBufferSize = 4096;
@@ -304,7 +318,8 @@ static Handle<Value> GetStdout(Isolate* isolate,
if (fcntl(child_fd, F_SETFL, O_NONBLOCK) != 0) {
return isolate->ThrowException(
- String::NewFromUtf8(isolate, strerror(errno)));
+ String::NewFromUtf8(isolate, strerror(errno), NewStringType::kNormal)
+ .ToLocalChecked());
}
int bytes_read;
@@ -319,7 +334,8 @@ static Handle<Value> GetStdout(Isolate* isolate,
start_time) ||
(TimeIsOut(start_time, total_timeout))) {
return isolate->ThrowException(
- String::NewFromUtf8(isolate, "Timed out waiting for output"));
+ String::NewFromUtf8(isolate, "Timed out waiting for output",
+ NewStringType::kNormal).ToLocalChecked());
}
continue;
} else if (errno == EINTR) {
@@ -332,8 +348,9 @@ static Handle<Value> GetStdout(Isolate* isolate,
int length = bytes_read == 0 ?
bytes_read + fullness :
LengthWithoutIncompleteUtf8(buffer, bytes_read + fullness);
- Handle<String> addition =
- String::NewFromUtf8(isolate, buffer, String::kNormalString, length);
+ Local<String> addition =
+ String::NewFromUtf8(isolate, buffer, NewStringType::kNormal, length)
+ .ToLocalChecked();
accumulator = String::Concat(accumulator, addition);
fullness = bytes_read + fullness - length;
memcpy(buffer, buffer + length, fullness);
@@ -380,8 +397,10 @@ static bool WaitForChild(Isolate* isolate,
if (useconds < 1000000) useconds <<= 1;
if ((read_timeout != -1 && useconds / 1000 > read_timeout) ||
(TimeIsOut(start_time, total_timeout))) {
- isolate->ThrowException(String::NewFromUtf8(
- isolate, "Timed out waiting for process to terminate"));
+ isolate->ThrowException(
+ String::NewFromUtf8(isolate,
+ "Timed out waiting for process to terminate",
+ NewStringType::kNormal).ToLocalChecked());
kill(pid, SIGINT);
return false;
}
@@ -392,7 +411,9 @@ static bool WaitForChild(Isolate* isolate,
sizeof(message),
"Child killed by signal %d",
child_info.si_status);
- isolate->ThrowException(String::NewFromUtf8(isolate, message));
+ isolate->ThrowException(
+ String::NewFromUtf8(isolate, message, NewStringType::kNormal)
+ .ToLocalChecked());
return false;
}
if (child_info.si_code == CLD_EXITED && child_info.si_status != 0) {
@@ -401,7 +422,9 @@ static bool WaitForChild(Isolate* isolate,
sizeof(message),
"Child exited with status %d",
child_info.si_status);
- isolate->ThrowException(String::NewFromUtf8(isolate, message));
+ isolate->ThrowException(
+ String::NewFromUtf8(isolate, message, NewStringType::kNormal)
+ .ToLocalChecked());
return false;
}
@@ -416,7 +439,9 @@ static bool WaitForChild(Isolate* isolate,
sizeof(message),
"Child killed by signal %d",
WTERMSIG(child_status));
- isolate->ThrowException(String::NewFromUtf8(isolate, message));
+ isolate->ThrowException(
+ String::NewFromUtf8(isolate, message, NewStringType::kNormal)
+ .ToLocalChecked());
return false;
}
if (WEXITSTATUS(child_status) != 0) {
@@ -426,7 +451,9 @@ static bool WaitForChild(Isolate* isolate,
sizeof(message),
"Child exited with status %d",
exit_status);
- isolate->ThrowException(String::NewFromUtf8(isolate, message));
+ isolate->ThrowException(
+ String::NewFromUtf8(isolate, message, NewStringType::kNormal)
+ .ToLocalChecked());
return false;
}
@@ -442,25 +469,29 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
int read_timeout = -1;
int total_timeout = -1;
if (!GetTimeouts(args, &read_timeout, &total_timeout)) return;
- Handle<Array> command_args;
+ Local<Array> command_args;
if (args.Length() > 1) {
if (!args[1]->IsArray()) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8(
- args.GetIsolate(), "system: Argument 2 must be an array"));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(),
+ "system: Argument 2 must be an array",
+ NewStringType::kNormal).ToLocalChecked());
return;
}
- command_args = Handle<Array>::Cast(args[1]);
+ command_args = Local<Array>::Cast(args[1]);
} else {
command_args = Array::New(args.GetIsolate(), 0);
}
if (command_args->Length() > ExecArgs::kMaxArgs) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8(
- args.GetIsolate(), "Too many arguments to system()"));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), "Too many arguments to system()",
+ NewStringType::kNormal).ToLocalChecked());
return;
}
if (args.Length() < 1) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8(
- args.GetIsolate(), "Too few arguments to system()"));
+ args.GetIsolate()->ThrowException(
+ String::NewFromUtf8(args.GetIsolate(), "Too few arguments to system()",
+ NewStringType::kNormal).ToLocalChecked());
return;
}
@@ -476,12 +507,14 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (pipe(exec_error_fds) != 0) {
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), "pipe syscall failed."));
+ String::NewFromUtf8(args.GetIsolate(), "pipe syscall failed.",
+ NewStringType::kNormal).ToLocalChecked());
return;
}
if (pipe(stdout_fds) != 0) {
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), "pipe syscall failed."));
+ String::NewFromUtf8(args.GetIsolate(), "pipe syscall failed.",
+ NewStringType::kNormal).ToLocalChecked());
return;
}
@@ -500,11 +533,8 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (!ChildLaunchedOK(args.GetIsolate(), exec_error_fds)) return;
- Handle<Value> accumulator = GetStdout(args.GetIsolate(),
- stdout_fds[kReadFD],
- start_time,
- read_timeout,
- total_timeout);
+ Local<Value> accumulator = GetStdout(args.GetIsolate(), stdout_fds[kReadFD],
+ start_time, read_timeout, total_timeout);
if (accumulator->IsUndefined()) {
kill(pid, SIGINT); // On timeout, kill the subprocess.
args.GetReturnValue().Set(accumulator);
@@ -528,19 +558,22 @@ void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "chdir() takes one argument";
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message));
+ String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.chdir(): String conversion of argument failed.";
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message));
+ String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
if (chdir(*directory) != 0) {
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), strerror(errno)));
+ String::NewFromUtf8(args.GetIsolate(), strerror(errno),
+ NewStringType::kNormal).ToLocalChecked());
return;
}
}
@@ -550,7 +583,8 @@ void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "umask() takes one argument";
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message));
+ String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
if (args[0]->IsNumber()) {
@@ -558,14 +592,16 @@ void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
// PNaCL has no support for umask.
int previous = 0;
#else
- int previous = umask(args[0]->Int32Value());
+ int previous = umask(
+ args[0]->Int32Value(args.GetIsolate()->GetCurrentContext()).FromJust());
#endif
args.GetReturnValue().Set(previous);
return;
} else {
const char* message = "umask() argument must be numeric";
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message));
+ String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
}
@@ -575,11 +611,15 @@ static bool CheckItsADirectory(Isolate* isolate, char* directory) {
struct stat stat_buf;
int stat_result = stat(directory, &stat_buf);
if (stat_result != 0) {
- isolate->ThrowException(String::NewFromUtf8(isolate, strerror(errno)));
+ isolate->ThrowException(
+ String::NewFromUtf8(isolate, strerror(errno), NewStringType::kNormal)
+ .ToLocalChecked());
return false;
}
if ((stat_buf.st_mode & S_IFDIR) != 0) return true;
- isolate->ThrowException(String::NewFromUtf8(isolate, strerror(EEXIST)));
+ isolate->ThrowException(
+ String::NewFromUtf8(isolate, strerror(EEXIST), NewStringType::kNormal)
+ .ToLocalChecked());
return false;
}
@@ -594,7 +634,9 @@ static bool mkdirp(Isolate* isolate, char* directory, mode_t mask) {
} else if (errno == ENOENT) { // Intermediate path element is missing.
char* last_slash = strrchr(directory, '/');
if (last_slash == NULL) {
- isolate->ThrowException(String::NewFromUtf8(isolate, strerror(errno)));
+ isolate->ThrowException(
+ String::NewFromUtf8(isolate, strerror(errno), NewStringType::kNormal)
+ .ToLocalChecked());
return false;
}
*last_slash = 0;
@@ -605,10 +647,14 @@ static bool mkdirp(Isolate* isolate, char* directory, mode_t mask) {
if (errno == EEXIST) {
return CheckItsADirectory(isolate, directory);
}
- isolate->ThrowException(String::NewFromUtf8(isolate, strerror(errno)));
+ isolate->ThrowException(
+ String::NewFromUtf8(isolate, strerror(errno), NewStringType::kNormal)
+ .ToLocalChecked());
return false;
} else {
- isolate->ThrowException(String::NewFromUtf8(isolate, strerror(errno)));
+ isolate->ThrowException(
+ String::NewFromUtf8(isolate, strerror(errno), NewStringType::kNormal)
+ .ToLocalChecked());
return false;
}
}
@@ -618,24 +664,29 @@ void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
mode_t mask = 0777;
if (args.Length() == 2) {
if (args[1]->IsNumber()) {
- mask = args[1]->Int32Value();
+ mask = args[1]
+ ->Int32Value(args.GetIsolate()->GetCurrentContext())
+ .FromJust();
} else {
const char* message = "mkdirp() second argument must be numeric";
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message));
+ String::NewFromUtf8(args.GetIsolate(), message,
+ NewStringType::kNormal).ToLocalChecked());
return;
}
} else if (args.Length() != 1) {
const char* message = "mkdirp() takes one or two arguments";
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message));
+ String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.mkdirp(): String conversion of argument failed.";
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message));
+ String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
mkdirp(args.GetIsolate(), *directory, mask);
@@ -646,14 +697,16 @@ void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "rmdir() takes one or two arguments";
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message));
+ String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.rmdir(): String conversion of argument failed.";
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message));
+ String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
rmdir(*directory);
@@ -664,7 +717,8 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2) {
const char* message = "setenv() takes two arguments";
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message));
+ String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
String::Utf8Value var(args[0]);
@@ -673,14 +727,16 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message));
+ String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
if (*value == NULL) {
const char* message =
"os.setenv(): String conversion of variable contents failed.";
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message));
+ String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
setenv(*var, *value, 1);
@@ -691,7 +747,8 @@ void Shell::UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "unsetenv() takes one argument";
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message));
+ String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
String::Utf8Value var(args[0]);
@@ -699,27 +756,35 @@ void Shell::UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), message));
+ String::NewFromUtf8(args.GetIsolate(), message, NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
unsetenv(*var);
}
-void Shell::AddOSMethods(Isolate* isolate, Handle<ObjectTemplate> os_templ) {
- os_templ->Set(String::NewFromUtf8(isolate, "system"),
+void Shell::AddOSMethods(Isolate* isolate, Local<ObjectTemplate> os_templ) {
+ os_templ->Set(String::NewFromUtf8(isolate, "system", NewStringType::kNormal)
+ .ToLocalChecked(),
FunctionTemplate::New(isolate, System));
- os_templ->Set(String::NewFromUtf8(isolate, "chdir"),
+ os_templ->Set(String::NewFromUtf8(isolate, "chdir", NewStringType::kNormal)
+ .ToLocalChecked(),
FunctionTemplate::New(isolate, ChangeDirectory));
- os_templ->Set(String::NewFromUtf8(isolate, "setenv"),
+ os_templ->Set(String::NewFromUtf8(isolate, "setenv", NewStringType::kNormal)
+ .ToLocalChecked(),
FunctionTemplate::New(isolate, SetEnvironment));
- os_templ->Set(String::NewFromUtf8(isolate, "unsetenv"),
+ os_templ->Set(String::NewFromUtf8(isolate, "unsetenv", NewStringType::kNormal)
+ .ToLocalChecked(),
FunctionTemplate::New(isolate, UnsetEnvironment));
- os_templ->Set(String::NewFromUtf8(isolate, "umask"),
+ os_templ->Set(String::NewFromUtf8(isolate, "umask", NewStringType::kNormal)
+ .ToLocalChecked(),
FunctionTemplate::New(isolate, SetUMask));
- os_templ->Set(String::NewFromUtf8(isolate, "mkdirp"),
+ os_templ->Set(String::NewFromUtf8(isolate, "mkdirp", NewStringType::kNormal)
+ .ToLocalChecked(),
FunctionTemplate::New(isolate, MakeDirectory));
- os_templ->Set(String::NewFromUtf8(isolate, "rmdir"),
+ os_templ->Set(String::NewFromUtf8(isolate, "rmdir", NewStringType::kNormal)
+ .ToLocalChecked(),
FunctionTemplate::New(isolate, RemoveDirectory));
}
diff --git a/deps/v8/src/d8-readline.cc b/deps/v8/src/d8-readline.cc
deleted file mode 100644
index 39c93d35de..0000000000
--- a/deps/v8/src/d8-readline.cc
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stdio.h> // NOLINT
-#include <string.h> // NOLINT
-#include <readline/readline.h> // NOLINT
-#include <readline/history.h> // NOLINT
-
-// The readline includes leaves RETURN defined which breaks V8 compilation.
-#undef RETURN
-
-#include "src/d8.h"
-
-// There are incompatibilities between different versions and different
-// implementations of readline. This smooths out one known incompatibility.
-#if RL_READLINE_VERSION >= 0x0500
-#define completion_matches rl_completion_matches
-#endif
-
-
-namespace v8 {
-
-
-class ReadLineEditor: public LineEditor {
- public:
- ReadLineEditor() : LineEditor(LineEditor::READLINE, "readline") { }
- virtual Handle<String> Prompt(const char* prompt);
- virtual bool Open(Isolate* isolate);
- virtual bool Close();
- virtual void AddHistory(const char* str);
-
- static const char* kHistoryFileName;
- static const int kMaxHistoryEntries;
-
- private:
-#ifndef V8_SHARED
- static char** AttemptedCompletion(const char* text, int start, int end);
- static char* CompletionGenerator(const char* text, int state);
-#endif // V8_SHARED
- static char kWordBreakCharacters[];
-
- Isolate* isolate_;
-};
-
-
-static ReadLineEditor read_line_editor;
-char ReadLineEditor::kWordBreakCharacters[] = {' ', '\t', '\n', '"',
- '\\', '\'', '`', '@', '.', '>', '<', '=', ';', '|', '&', '{', '(',
- '\0'};
-
-
-const char* ReadLineEditor::kHistoryFileName = ".d8_history";
-const int ReadLineEditor::kMaxHistoryEntries = 1000;
-
-
-bool ReadLineEditor::Open(Isolate* isolate) {
- isolate_ = isolate;
-
- rl_initialize();
-
-#ifdef V8_SHARED
- // Don't do completion on shared library mode
- // http://cnswww.cns.cwru.edu/php/chet/readline/readline.html#SEC24
- rl_bind_key('\t', rl_insert);
-#else
- rl_attempted_completion_function = AttemptedCompletion;
-#endif // V8_SHARED
-
- rl_completer_word_break_characters = kWordBreakCharacters;
- rl_bind_key('\t', rl_complete);
- using_history();
- stifle_history(kMaxHistoryEntries);
- return read_history(kHistoryFileName) == 0;
-}
-
-
-bool ReadLineEditor::Close() {
- return write_history(kHistoryFileName) == 0;
-}
-
-
-Handle<String> ReadLineEditor::Prompt(const char* prompt) {
- char* result = NULL;
- result = readline(prompt);
- if (result == NULL) return Handle<String>();
- AddHistory(result);
- return String::NewFromUtf8(isolate_, result);
-}
-
-
-void ReadLineEditor::AddHistory(const char* str) {
- // Do not record empty input.
- if (strlen(str) == 0) return;
- // Remove duplicate history entry.
- history_set_pos(history_length-1);
- if (current_history()) {
- do {
- if (strcmp(current_history()->line, str) == 0) {
- remove_history(where_history());
- break;
- }
- } while (previous_history());
- }
- add_history(str);
-}
-
-
-#ifndef V8_SHARED
-char** ReadLineEditor::AttemptedCompletion(const char* text,
- int start,
- int end) {
- char** result = completion_matches(text, CompletionGenerator);
- rl_attempted_completion_over = true;
- return result;
-}
-
-
-char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
- static unsigned current_index;
- static Persistent<Array> current_completions;
- Isolate* isolate = read_line_editor.isolate_;
- HandleScope scope(isolate);
- Handle<Array> completions;
- if (state == 0) {
- Local<String> full_text = String::NewFromUtf8(isolate,
- rl_line_buffer,
- String::kNormalString,
- rl_point);
- completions = Shell::GetCompletions(isolate,
- String::NewFromUtf8(isolate, text),
- full_text);
- current_completions.Reset(isolate, completions);
- current_index = 0;
- } else {
- completions = Local<Array>::New(isolate, current_completions);
- }
- if (current_index < completions->Length()) {
- Handle<Integer> index = Integer::New(isolate, current_index);
- Handle<Value> str_obj = completions->Get(index);
- current_index++;
- String::Utf8Value str(str_obj);
- return strdup(*str);
- } else {
- current_completions.Reset();
- return NULL;
- }
-}
-#endif // V8_SHARED
-
-
-} // namespace v8
diff --git a/deps/v8/src/d8-windows.cc b/deps/v8/src/d8-windows.cc
index 06c0a4e87d..ba89c4156f 100644
--- a/deps/v8/src/d8-windows.cc
+++ b/deps/v8/src/d8-windows.cc
@@ -8,8 +8,7 @@
namespace v8 {
-void Shell::AddOSMethods(Isolate* isolate, Handle<ObjectTemplate> os_templ) {
-}
+void Shell::AddOSMethods(Isolate* isolate, Local<ObjectTemplate> os_templ) {}
} // namespace v8
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 45bf33167f..58b59c890f 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -19,6 +19,7 @@
#ifndef V8_SHARED
#include <algorithm>
+#include <vector>
#endif // !V8_SHARED
#ifdef V8_SHARED
@@ -43,13 +44,15 @@
#include "src/base/platform/platform.h"
#include "src/base/sys-info.h"
#include "src/basic-block-profiler.h"
-#include "src/d8-debug.h"
-#include "src/debug.h"
#include "src/snapshot/natives.h"
#include "src/utils.h"
#include "src/v8.h"
#endif // !V8_SHARED
+#if defined(V8_WASM)
+#include "src/wasm/wasm-js.h"
+#endif
+
#if !defined(_WIN32) && !defined(_WIN64)
#include <unistd.h> // NOLINT
#else
@@ -72,6 +75,9 @@ namespace v8 {
namespace {
const int MB = 1024 * 1024;
+#ifndef V8_SHARED
+const int kMaxWorkers = 50;
+#endif
class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
@@ -102,8 +108,15 @@ class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
v8::Platform* g_platform = NULL;
+static Local<Value> Throw(Isolate* isolate, const char* message) {
+ return isolate->ThrowException(
+ String::NewFromUtf8(isolate, message, NewStringType::kNormal)
+ .ToLocalChecked());
+}
+
+
#ifndef V8_SHARED
-bool FindInObjectList(Handle<Object> object, const Shell::ObjectList& list) {
+bool FindInObjectList(Local<Object> object, const Shell::ObjectList& list) {
for (int i = 0; i < list.length(); ++i) {
if (list[i]->StrictEquals(object)) {
return true;
@@ -111,16 +124,27 @@ bool FindInObjectList(Handle<Object> object, const Shell::ObjectList& list) {
}
return false;
}
-#endif // !V8_SHARED
-} // namespace
+Worker* GetWorkerFromInternalField(Isolate* isolate, Local<Object> object) {
+ if (object->InternalFieldCount() != 1) {
+ Throw(isolate, "this is not a Worker");
+ return NULL;
+ }
+ Worker* worker =
+ static_cast<Worker*>(object->GetAlignedPointerFromInternalField(0));
+ if (worker == NULL) {
+ Throw(isolate, "Worker is defunct because main thread is terminating");
+ return NULL;
+ }
-static Handle<Value> Throw(Isolate* isolate, const char* message) {
- return isolate->ThrowException(String::NewFromUtf8(isolate, message));
+ return worker;
}
+#endif // !V8_SHARED
+
+} // namespace
class PerIsolateData {
@@ -153,64 +177,34 @@ class PerIsolateData {
int realm_count_;
int realm_current_;
int realm_switch_;
- Persistent<Context>* realms_;
- Persistent<Value> realm_shared_;
+ Global<Context>* realms_;
+ Global<Value> realm_shared_;
int RealmIndexOrThrow(const v8::FunctionCallbackInfo<v8::Value>& args,
int arg_offset);
- int RealmFind(Handle<Context> context);
-};
-
-
-LineEditor *LineEditor::current_ = NULL;
-
-
-LineEditor::LineEditor(Type type, const char* name)
- : type_(type), name_(name) {
- if (current_ == NULL || current_->type_ < type) current_ = this;
-}
-
-
-class DumbLineEditor: public LineEditor {
- public:
- explicit DumbLineEditor(Isolate* isolate)
- : LineEditor(LineEditor::DUMB, "dumb"), isolate_(isolate) { }
- virtual Handle<String> Prompt(const char* prompt);
- private:
- Isolate* isolate_;
+ int RealmFind(Local<Context> context);
};
-Handle<String> DumbLineEditor::Prompt(const char* prompt) {
- printf("%s", prompt);
-#if defined(__native_client__)
- // Native Client libc is used to being embedded in Chrome and
- // has trouble recognizing when to flush.
- fflush(stdout);
-#endif
- return Shell::ReadFromStdin(isolate_);
-}
-
-
#ifndef V8_SHARED
CounterMap* Shell::counter_map_;
base::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
-base::Mutex Shell::context_mutex_;
+base::LazyMutex Shell::context_mutex_;
const base::TimeTicks Shell::kInitialTicks =
base::TimeTicks::HighResolutionNow();
-Persistent<Context> Shell::utility_context_;
-base::Mutex Shell::workers_mutex_;
+Global<Context> Shell::utility_context_;
+base::LazyMutex Shell::workers_mutex_;
bool Shell::allow_new_workers_ = true;
i::List<Worker*> Shell::workers_;
i::List<SharedArrayBuffer::Contents> Shell::externalized_shared_contents_;
#endif // !V8_SHARED
-Persistent<Context> Shell::evaluation_context_;
+Global<Context> Shell::evaluation_context_;
ArrayBuffer::Allocator* Shell::array_buffer_allocator;
ShellOptions Shell::options;
-const char* Shell::kPrompt = "d8> ";
+base::OnceType Shell::quit_once_ = V8_ONCE_INIT;
#ifndef V8_SHARED
bool CounterMap::Match(void* key1, void* key2) {
@@ -249,19 +243,22 @@ ScriptCompiler::CachedData* CompileForCachedData(
Isolate::Scope isolate_scope(temp_isolate);
HandleScope handle_scope(temp_isolate);
Context::Scope context_scope(Context::New(temp_isolate));
- Local<String> source_copy = v8::String::NewFromTwoByte(
- temp_isolate, source_buffer, v8::String::kNormalString, source_length);
+ Local<String> source_copy =
+ v8::String::NewFromTwoByte(temp_isolate, source_buffer,
+ v8::NewStringType::kNormal,
+ source_length).ToLocalChecked();
Local<Value> name_copy;
if (name_buffer) {
- name_copy = v8::String::NewFromTwoByte(
- temp_isolate, name_buffer, v8::String::kNormalString, name_length);
+ name_copy = v8::String::NewFromTwoByte(temp_isolate, name_buffer,
+ v8::NewStringType::kNormal,
+ name_length).ToLocalChecked();
} else {
name_copy = v8::Undefined(temp_isolate);
}
ScriptCompiler::Source script_source(source_copy, ScriptOrigin(name_copy));
- ScriptCompiler::CompileUnbound(temp_isolate, &script_source,
- compile_options);
- if (script_source.GetCachedData()) {
+ if (!ScriptCompiler::CompileUnboundScript(temp_isolate, &script_source,
+ compile_options).IsEmpty() &&
+ script_source.GetCachedData()) {
int length = script_source.GetCachedData()->length;
uint8_t* cache = new uint8_t[length];
memcpy(cache, script_source.GetCachedData()->data, length);
@@ -277,16 +274,17 @@ ScriptCompiler::CachedData* CompileForCachedData(
// Compile a string within the current v8 context.
-Local<Script> Shell::CompileString(
+MaybeLocal<Script> Shell::CompileString(
Isolate* isolate, Local<String> source, Local<Value> name,
ScriptCompiler::CompileOptions compile_options, SourceType source_type) {
+ Local<Context> context(isolate->GetCurrentContext());
ScriptOrigin origin(name);
if (compile_options == ScriptCompiler::kNoCompileOptions) {
ScriptCompiler::Source script_source(source, origin);
return source_type == SCRIPT
- ? ScriptCompiler::Compile(isolate, &script_source,
+ ? ScriptCompiler::Compile(context, &script_source,
compile_options)
- : ScriptCompiler::CompileModule(isolate, &script_source,
+ : ScriptCompiler::CompileModule(context, &script_source,
compile_options);
}
@@ -301,10 +299,10 @@ Local<Script> Shell::CompileString(
DCHECK(false); // A new compile option?
}
if (data == NULL) compile_options = ScriptCompiler::kNoCompileOptions;
- Local<Script> result =
+ MaybeLocal<Script> result =
source_type == SCRIPT
- ? ScriptCompiler::Compile(isolate, &cached_source, compile_options)
- : ScriptCompiler::CompileModule(isolate, &cached_source,
+ ? ScriptCompiler::Compile(context, &cached_source, compile_options)
+ : ScriptCompiler::CompileModule(context, &cached_source,
compile_options);
CHECK(data == NULL || !data->rejected);
return result;
@@ -312,44 +310,34 @@ Local<Script> Shell::CompileString(
// Executes a string within the current v8 context.
-bool Shell::ExecuteString(Isolate* isolate, Handle<String> source,
- Handle<Value> name, bool print_result,
+bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
+ Local<Value> name, bool print_result,
bool report_exceptions, SourceType source_type) {
-#ifndef V8_SHARED
- bool FLAG_debugger = i::FLAG_debugger;
-#else
- bool FLAG_debugger = false;
-#endif // !V8_SHARED
HandleScope handle_scope(isolate);
TryCatch try_catch(isolate);
- options.script_executed = true;
- if (FLAG_debugger) {
- // When debugging make exceptions appear to be uncaught.
- try_catch.SetVerbose(true);
- }
- Handle<Value> result;
+ MaybeLocal<Value> maybe_result;
{
PerIsolateData* data = PerIsolateData::Get(isolate);
Local<Context> realm =
Local<Context>::New(isolate, data->realms_[data->realm_current_]);
Context::Scope context_scope(realm);
- Handle<Script> script = Shell::CompileString(
- isolate, source, name, options.compile_options, source_type);
- if (script.IsEmpty()) {
+ Local<Script> script;
+ if (!Shell::CompileString(isolate, source, name, options.compile_options,
+ source_type).ToLocal(&script)) {
// Print errors that happened during compilation.
- if (report_exceptions && !FLAG_debugger)
- ReportException(isolate, &try_catch);
+ if (report_exceptions) ReportException(isolate, &try_catch);
return false;
}
- result = script->Run();
+ maybe_result = script->Run(realm);
+ EmptyMessageQueues(isolate);
data->realm_current_ = data->realm_switch_;
}
- if (result.IsEmpty()) {
+ Local<Value> result;
+ if (!maybe_result.ToLocal(&result)) {
DCHECK(try_catch.HasCaught());
// Print errors that happened during execution.
- if (report_exceptions && !FLAG_debugger)
- ReportException(isolate, &try_catch);
+ if (report_exceptions) ReportException(isolate, &try_catch);
return false;
}
DCHECK(!try_catch.HasCaught());
@@ -370,12 +358,19 @@ bool Shell::ExecuteString(Isolate* isolate, Handle<String> source,
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(context);
- Handle<Object> global = context->Global();
- Handle<Value> fun =
- global->Get(String::NewFromUtf8(isolate, "Stringify"));
- Handle<Value> argv[1] = {result};
- Handle<Value> s = Handle<Function>::Cast(fun)->Call(global, 1, argv);
- if (try_catch.HasCaught()) return true;
+ Local<Object> global = context->Global();
+ Local<Value> fun =
+ global->Get(context, String::NewFromUtf8(isolate, "Stringify",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked()).ToLocalChecked();
+ Local<Value> argv[1] = {result};
+ Local<Value> s;
+ if (!Local<Function>::Cast(fun)
+ ->Call(context, global, 1, argv)
+ .ToLocal(&s)) {
+ return true;
+ }
+ DCHECK(!try_catch.HasCaught());
v8::String::Utf8Value str(s);
fwrite(*str, sizeof(**str), str.length(), stdout);
printf("\n");
@@ -390,7 +385,7 @@ PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
data_->realm_count_ = 1;
data_->realm_current_ = 0;
data_->realm_switch_ = 0;
- data_->realms_ = new Persistent<Context>[1];
+ data_->realms_ = new Global<Context>[1];
data_->realms_[0].Reset(data_->isolate_,
data_->isolate_->GetEnteredContext());
}
@@ -406,7 +401,7 @@ PerIsolateData::RealmScope::~RealmScope() {
}
-int PerIsolateData::RealmFind(Handle<Context> context) {
+int PerIsolateData::RealmFind(Local<Context> context) {
for (int i = 0; i < realm_count_; ++i) {
if (realms_[i] == context) return i;
}
@@ -421,10 +416,10 @@ int PerIsolateData::RealmIndexOrThrow(
Throw(args.GetIsolate(), "Invalid argument");
return -1;
}
- int index = args[arg_offset]->Int32Value();
- if (index < 0 ||
- index >= realm_count_ ||
- realms_[index].IsEmpty()) {
+ int index = args[arg_offset]
+ ->Int32Value(args.GetIsolate()->GetCurrentContext())
+ .FromMaybe(-1);
+ if (index < 0 || index >= realm_count_ || realms_[index].IsEmpty()) {
Throw(args.GetIsolate(), "Invalid realm index");
return -1;
}
@@ -468,7 +463,10 @@ void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw(args.GetIsolate(), "Invalid argument");
return;
}
- int index = data->RealmFind(args[0]->ToObject(isolate)->CreationContext());
+ int index = data->RealmFind(args[0]
+ ->ToObject(isolate->GetCurrentContext())
+ .ToLocalChecked()
+ ->CreationContext());
if (index == -1) return;
args.GetReturnValue().Set(index);
}
@@ -490,15 +488,15 @@ void Shell::RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
TryCatch try_catch(isolate);
PerIsolateData* data = PerIsolateData::Get(isolate);
- Persistent<Context>* old_realms = data->realms_;
+ Global<Context>* old_realms = data->realms_;
int index = data->realm_count_;
- data->realms_ = new Persistent<Context>[++data->realm_count_];
+ data->realms_ = new Global<Context>[++data->realm_count_];
for (int i = 0; i < index; ++i) {
data->realms_[i].Reset(isolate, old_realms[i]);
old_realms[i].Reset();
}
delete[] old_realms;
- Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
+ Local<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
Local<Context> context = Context::New(isolate, NULL, global_template);
if (context.IsEmpty()) {
DCHECK(try_catch.HasCaught());
@@ -547,13 +545,20 @@ void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw(args.GetIsolate(), "Invalid argument");
return;
}
- ScriptCompiler::Source script_source(args[1]->ToString(isolate));
- Handle<UnboundScript> script = ScriptCompiler::CompileUnbound(
- isolate, &script_source);
- if (script.IsEmpty()) return;
+ ScriptCompiler::Source script_source(
+ args[1]->ToString(isolate->GetCurrentContext()).ToLocalChecked());
+ Local<UnboundScript> script;
+ if (!ScriptCompiler::CompileUnboundScript(isolate, &script_source)
+ .ToLocal(&script)) {
+ return;
+ }
Local<Context> realm = Local<Context>::New(isolate, data->realms_[index]);
realm->Enter();
- Handle<Value> result = script->BindToCurrentContext()->Run();
+ Local<Value> result;
+ if (!script->BindToCurrentContext()->Run(realm).ToLocal(&result)) {
+ realm->Exit();
+ return;
+ }
realm->Exit();
args.GetReturnValue().Set(result);
}
@@ -593,8 +598,10 @@ void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Explicitly catch potential exceptions in toString().
v8::TryCatch try_catch(args.GetIsolate());
- Handle<String> str_obj = args[i]->ToString(args.GetIsolate());
- if (try_catch.HasCaught()) {
+ Local<String> str_obj;
+ if (!args[i]
+ ->ToString(args.GetIsolate()->GetCurrentContext())
+ .ToLocal(&str_obj)) {
try_catch.ReThrow();
return;
}
@@ -615,7 +622,7 @@ void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw(args.GetIsolate(), "Error loading file");
return;
}
- Handle<String> source = ReadFile(args.GetIsolate(), *file);
+ Local<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
Throw(args.GetIsolate(), "Error loading file");
return;
@@ -624,10 +631,11 @@ void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
-Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
+Local<String> Shell::ReadFromStdin(Isolate* isolate) {
static const int kBufferSize = 256;
char buffer[kBufferSize];
- Handle<String> accumulator = String::NewFromUtf8(isolate, "");
+ Local<String> accumulator =
+ String::NewFromUtf8(isolate, "", NewStringType::kNormal).ToLocalChecked();
int length;
while (true) {
// Continue reading if the line ends with an escape '\\' or the line has
@@ -635,23 +643,26 @@ Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
// If fgets gets an error, just give up.
char* input = NULL;
input = fgets(buffer, kBufferSize, stdin);
- if (input == NULL) return Handle<String>();
+ if (input == NULL) return Local<String>();
length = static_cast<int>(strlen(buffer));
if (length == 0) {
return accumulator;
} else if (buffer[length-1] != '\n') {
accumulator = String::Concat(
accumulator,
- String::NewFromUtf8(isolate, buffer, String::kNormalString, length));
+ String::NewFromUtf8(isolate, buffer, NewStringType::kNormal, length)
+ .ToLocalChecked());
} else if (length > 1 && buffer[length-2] == '\\') {
buffer[length-2] = '\n';
accumulator = String::Concat(
- accumulator, String::NewFromUtf8(isolate, buffer,
- String::kNormalString, length - 1));
+ accumulator,
+ String::NewFromUtf8(isolate, buffer, NewStringType::kNormal,
+ length - 1).ToLocalChecked());
} else {
return String::Concat(
- accumulator, String::NewFromUtf8(isolate, buffer,
- String::kNormalString, length - 1));
+ accumulator,
+ String::NewFromUtf8(isolate, buffer, NewStringType::kNormal,
+ length - 1).ToLocalChecked());
}
}
}
@@ -665,16 +676,16 @@ void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw(args.GetIsolate(), "Error loading file");
return;
}
- Handle<String> source = ReadFile(args.GetIsolate(), *file);
+ Local<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
Throw(args.GetIsolate(), "Error loading file");
return;
}
- if (!ExecuteString(args.GetIsolate(),
- source,
- String::NewFromUtf8(args.GetIsolate(), *file),
- false,
- true)) {
+ if (!ExecuteString(
+ args.GetIsolate(), source,
+ String::NewFromUtf8(args.GetIsolate(), *file,
+ NewStringType::kNormal).ToLocalChecked(),
+ false, true)) {
Throw(args.GetIsolate(), "Error executing file");
return;
}
@@ -691,12 +702,27 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
+ if (!args.IsConstructCall()) {
+ Throw(args.GetIsolate(), "Worker must be constructed with new");
+ return;
+ }
+
{
- base::LockGuard<base::Mutex> lock_guard(&workers_mutex_);
+ base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
+ if (workers_.length() >= kMaxWorkers) {
+ Throw(args.GetIsolate(), "Too many workers, I won't let you create more");
+ return;
+ }
+
+ // Initialize the internal field to NULL; if we return early without
+ // creating a new Worker (because the main thread is terminating) we can
+ // early-out from the instance calls.
+ args.Holder()->SetAlignedPointerInInternalField(0, NULL);
+
if (!allow_new_workers_) return;
Worker* worker = new Worker;
- args.This()->SetInternalField(0, External::New(isolate, worker));
+ args.Holder()->SetAlignedPointerInInternalField(0, worker);
workers_.Add(worker);
String::Utf8Value script(args[0]);
@@ -704,7 +730,7 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw(args.GetIsolate(), "Can't get worker script");
return;
}
- worker->StartExecuteInThread(isolate, *script);
+ worker->StartExecuteInThread(*script);
}
}
@@ -719,16 +745,12 @@ void Shell::WorkerPostMessage(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- Local<Value> this_value = args.This()->GetInternalField(0);
- if (!this_value->IsExternal()) {
- Throw(isolate, "this is not a Worker");
+ Worker* worker = GetWorkerFromInternalField(isolate, args.Holder());
+ if (!worker) {
return;
}
- Worker* worker =
- static_cast<Worker*>(Local<External>::Cast(this_value)->Value());
-
- Handle<Value> message = args[0];
+ Local<Value> message = args[0];
ObjectList to_transfer;
if (args.Length() >= 2) {
if (!args[1]->IsArray()) {
@@ -736,10 +758,10 @@ void Shell::WorkerPostMessage(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- Handle<Array> transfer = Handle<Array>::Cast(args[1]);
+ Local<Array> transfer = Local<Array>::Cast(args[1]);
uint32_t length = transfer->Length();
for (uint32_t i = 0; i < length; ++i) {
- Handle<Value> element;
+ Local<Value> element;
if (transfer->Get(context, i).ToLocal(&element)) {
if (!element->IsArrayBuffer() && !element->IsSharedArrayBuffer()) {
Throw(isolate,
@@ -748,7 +770,7 @@ void Shell::WorkerPostMessage(const v8::FunctionCallbackInfo<v8::Value>& args) {
break;
}
- to_transfer.Add(Handle<Object>::Cast(element));
+ to_transfer.Add(Local<Object>::Cast(element));
}
}
}
@@ -766,16 +788,11 @@ void Shell::WorkerPostMessage(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::WorkerGetMessage(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
HandleScope handle_scope(isolate);
-
- Local<Value> this_value = args.This()->GetInternalField(0);
- if (!this_value->IsExternal()) {
- Throw(isolate, "this is not a Worker");
+ Worker* worker = GetWorkerFromInternalField(isolate, args.Holder());
+ if (!worker) {
return;
}
- Worker* worker =
- static_cast<Worker*>(Local<External>::Cast(this_value)->Value());
-
SerializationData* data = worker->GetMessage();
if (data) {
int offset = 0;
@@ -791,36 +808,45 @@ void Shell::WorkerGetMessage(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::WorkerTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
HandleScope handle_scope(isolate);
- Local<Value> this_value = args.This()->GetInternalField(0);
- if (!this_value->IsExternal()) {
- Throw(isolate, "this is not a Worker");
+ Worker* worker = GetWorkerFromInternalField(isolate, args.Holder());
+ if (!worker) {
return;
}
- Worker* worker =
- static_cast<Worker*>(Local<External>::Cast(this_value)->Value());
worker->Terminate();
}
#endif // !V8_SHARED
+void Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args) {
+ int exit_code = (*args)[0]
+ ->Int32Value(args->GetIsolate()->GetCurrentContext())
+ .FromMaybe(0);
+#ifndef V8_SHARED
+ CleanupWorkers();
+#endif // !V8_SHARED
+ OnExit(args->GetIsolate());
+ Exit(exit_code);
+}
+
+
void Shell::Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
- int exit_code = args[0]->Int32Value();
- OnExit(args.GetIsolate());
- exit(exit_code);
+ base::CallOnce(&quit_once_, &QuitOnce,
+ const_cast<v8::FunctionCallbackInfo<v8::Value>*>(&args));
}
void Shell::Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(
- String::NewFromUtf8(args.GetIsolate(), V8::GetVersion()));
+ String::NewFromUtf8(args.GetIsolate(), V8::GetVersion(),
+ NewStringType::kNormal).ToLocalChecked());
}
void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
HandleScope handle_scope(isolate);
#ifndef V8_SHARED
- Handle<Context> utility_context;
+ Local<Context> utility_context;
bool enter_context = !isolate->InContext();
if (enter_context) {
utility_context = Local<Context>::New(isolate, utility_context_);
@@ -829,7 +855,7 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
#endif // !V8_SHARED
v8::String::Utf8Value exception(try_catch->Exception());
const char* exception_string = ToCString(exception);
- Handle<Message> message = try_catch->Message();
+ Local<Message> message = try_catch->Message();
if (message.IsEmpty()) {
// V8 didn't provide any extra information about this error; just
// print the exception.
@@ -838,26 +864,32 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
// Print (filename):(line number): (message).
v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
const char* filename_string = ToCString(filename);
- int linenum = message->GetLineNumber();
+ int linenum =
+ message->GetLineNumber(isolate->GetCurrentContext()).FromJust();
printf("%s:%i: %s\n", filename_string, linenum, exception_string);
// Print line of source code.
- v8::String::Utf8Value sourceline(message->GetSourceLine());
+ v8::String::Utf8Value sourceline(
+ message->GetSourceLine(isolate->GetCurrentContext()).ToLocalChecked());
const char* sourceline_string = ToCString(sourceline);
printf("%s\n", sourceline_string);
// Print wavy underline (GetUnderline is deprecated).
- int start = message->GetStartColumn();
+ int start =
+ message->GetStartColumn(isolate->GetCurrentContext()).FromJust();
for (int i = 0; i < start; i++) {
printf(" ");
}
- int end = message->GetEndColumn();
+ int end = message->GetEndColumn(isolate->GetCurrentContext()).FromJust();
for (int i = start; i < end; i++) {
printf("^");
}
printf("\n");
- v8::String::Utf8Value stack_trace(try_catch->StackTrace());
- if (stack_trace.length() > 0) {
- const char* stack_trace_string = ToCString(stack_trace);
- printf("%s\n", stack_trace_string);
+ Local<Value> stack_trace_string;
+ if (try_catch->StackTrace(isolate->GetCurrentContext())
+ .ToLocal(&stack_trace_string) &&
+ stack_trace_string->IsString()) {
+ v8::String::Utf8Value stack_trace(
+ Local<String>::Cast(stack_trace_string));
+ printf("%s\n", ToCString(stack_trace));
}
}
printf("\n");
@@ -868,57 +900,6 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
#ifndef V8_SHARED
-Handle<Array> Shell::GetCompletions(Isolate* isolate,
- Handle<String> text,
- Handle<String> full) {
- EscapableHandleScope handle_scope(isolate);
- v8::Local<v8::Context> utility_context =
- v8::Local<v8::Context>::New(isolate, utility_context_);
- v8::Context::Scope context_scope(utility_context);
- Handle<Object> global = utility_context->Global();
- Local<Value> fun =
- global->Get(String::NewFromUtf8(isolate, "GetCompletions"));
- static const int kArgc = 3;
- v8::Local<v8::Context> evaluation_context =
- v8::Local<v8::Context>::New(isolate, evaluation_context_);
- Handle<Value> argv[kArgc] = { evaluation_context->Global(), text, full };
- Local<Value> val = Local<Function>::Cast(fun)->Call(global, kArgc, argv);
- return handle_scope.Escape(Local<Array>::Cast(val));
-}
-
-
-Local<Object> Shell::DebugMessageDetails(Isolate* isolate,
- Handle<String> message) {
- EscapableHandleScope handle_scope(isolate);
- v8::Local<v8::Context> context =
- v8::Local<v8::Context>::New(isolate, utility_context_);
- v8::Context::Scope context_scope(context);
- Handle<Object> global = context->Global();
- Handle<Value> fun =
- global->Get(String::NewFromUtf8(isolate, "DebugMessageDetails"));
- static const int kArgc = 1;
- Handle<Value> argv[kArgc] = { message };
- Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
- return handle_scope.Escape(Local<Object>(Handle<Object>::Cast(val)));
-}
-
-
-Local<Value> Shell::DebugCommandToJSONRequest(Isolate* isolate,
- Handle<String> command) {
- EscapableHandleScope handle_scope(isolate);
- v8::Local<v8::Context> context =
- v8::Local<v8::Context>::New(isolate, utility_context_);
- v8::Context::Scope context_scope(context);
- Handle<Object> global = context->Global();
- Handle<Value> fun =
- global->Get(String::NewFromUtf8(isolate, "DebugCommandToJSONRequest"));
- static const int kArgc = 1;
- Handle<Value> argv[kArgc] = { command };
- Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
- return handle_scope.Escape(Local<Value>(val));
-}
-
-
int32_t* Counter::Bind(const char* name, bool is_histogram) {
int i;
for (i = 0; i < kMaxNameSize - 1 && name[i]; i++)
@@ -1034,6 +1015,8 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
HandleScope scope(isolate);
// If we use the utility context, we have to set the security tokens so that
// utility, evaluation and debug context can all access each other.
+ Local<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
+ utility_context_.Reset(isolate, Context::New(isolate, NULL, global_template));
v8::Local<v8::Context> utility_context =
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Local<v8::Context> evaluation_context =
@@ -1042,33 +1025,23 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
evaluation_context->SetSecurityToken(Undefined(isolate));
v8::Context::Scope context_scope(utility_context);
- if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
- // Install the debugger object in the utility scope
- i::Debug* debug = reinterpret_cast<i::Isolate*>(isolate)->debug();
- debug->Load();
- i::Handle<i::Context> debug_context = debug->debug_context();
- i::Handle<i::JSObject> js_debug
- = i::Handle<i::JSObject>(debug_context->global_object());
- utility_context->Global()->Set(String::NewFromUtf8(isolate, "$debug"),
- Utils::ToLocal(js_debug));
- debug_context->set_security_token(
- reinterpret_cast<i::Isolate*>(isolate)->heap()->undefined_value());
-
// Run the d8 shell utility script in the utility context
int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
i::Vector<const char> shell_source =
i::NativesCollection<i::D8>::GetScriptSource(source_index);
i::Vector<const char> shell_source_name =
i::NativesCollection<i::D8>::GetScriptName(source_index);
- Handle<String> source =
- String::NewFromUtf8(isolate, shell_source.start(), String::kNormalString,
- shell_source.length());
- Handle<String> name =
+ Local<String> source =
+ String::NewFromUtf8(isolate, shell_source.start(), NewStringType::kNormal,
+ shell_source.length()).ToLocalChecked();
+ Local<String> name =
String::NewFromUtf8(isolate, shell_source_name.start(),
- String::kNormalString, shell_source_name.length());
+ NewStringType::kNormal,
+ shell_source_name.length()).ToLocalChecked();
ScriptOrigin origin(name);
- Handle<Script> script = Script::Compile(source, &origin);
- script->Run();
+ Local<Script> script =
+ Script::Compile(utility_context, source, &origin).ToLocalChecked();
+ script->Run(utility_context).ToLocalChecked();
// Mark the d8 shell script as native to avoid it showing up as normal source
// in the debugger.
i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script);
@@ -1078,83 +1051,141 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
: i::Handle<i::Script>(i::Script::cast(
i::SharedFunctionInfo::cast(*compiled_script)->script()));
script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
-
- // Start the in-process debugger if requested.
- if (i::FLAG_debugger) v8::Debug::SetDebugEventListener(HandleDebugEvent);
}
#endif // !V8_SHARED
-Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
- Handle<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
- global_template->Set(String::NewFromUtf8(isolate, "print"),
- FunctionTemplate::New(isolate, Print));
- global_template->Set(String::NewFromUtf8(isolate, "write"),
- FunctionTemplate::New(isolate, Write));
- global_template->Set(String::NewFromUtf8(isolate, "read"),
- FunctionTemplate::New(isolate, Read));
- global_template->Set(String::NewFromUtf8(isolate, "readbuffer"),
- FunctionTemplate::New(isolate, ReadBuffer));
- global_template->Set(String::NewFromUtf8(isolate, "readline"),
- FunctionTemplate::New(isolate, ReadLine));
- global_template->Set(String::NewFromUtf8(isolate, "load"),
- FunctionTemplate::New(isolate, Load));
+Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
+ Local<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
+ global_template->Set(
+ String::NewFromUtf8(isolate, "print", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, Print));
+ global_template->Set(
+ String::NewFromUtf8(isolate, "write", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, Write));
+ global_template->Set(
+ String::NewFromUtf8(isolate, "read", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, Read));
+ global_template->Set(
+ String::NewFromUtf8(isolate, "readbuffer", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, ReadBuffer));
+ global_template->Set(
+ String::NewFromUtf8(isolate, "readline", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, ReadLine));
+ global_template->Set(
+ String::NewFromUtf8(isolate, "load", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, Load));
// Some Emscripten-generated code tries to call 'quit', which in turn would
// call C's exit(). This would lead to memory leaks, because there is no way
// we can terminate cleanly then, so we need a way to hide 'quit'.
if (!options.omit_quit) {
- global_template->Set(String::NewFromUtf8(isolate, "quit"),
- FunctionTemplate::New(isolate, Quit));
+ global_template->Set(
+ String::NewFromUtf8(isolate, "quit", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, Quit));
}
- global_template->Set(String::NewFromUtf8(isolate, "version"),
- FunctionTemplate::New(isolate, Version));
+ global_template->Set(
+ String::NewFromUtf8(isolate, "version", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, Version));
// Bind the Realm object.
- Handle<ObjectTemplate> realm_template = ObjectTemplate::New(isolate);
- realm_template->Set(String::NewFromUtf8(isolate, "current"),
- FunctionTemplate::New(isolate, RealmCurrent));
- realm_template->Set(String::NewFromUtf8(isolate, "owner"),
- FunctionTemplate::New(isolate, RealmOwner));
- realm_template->Set(String::NewFromUtf8(isolate, "global"),
- FunctionTemplate::New(isolate, RealmGlobal));
- realm_template->Set(String::NewFromUtf8(isolate, "create"),
- FunctionTemplate::New(isolate, RealmCreate));
- realm_template->Set(String::NewFromUtf8(isolate, "dispose"),
- FunctionTemplate::New(isolate, RealmDispose));
- realm_template->Set(String::NewFromUtf8(isolate, "switch"),
- FunctionTemplate::New(isolate, RealmSwitch));
- realm_template->Set(String::NewFromUtf8(isolate, "eval"),
- FunctionTemplate::New(isolate, RealmEval));
- realm_template->SetAccessor(String::NewFromUtf8(isolate, "shared"),
- RealmSharedGet, RealmSharedSet);
- global_template->Set(String::NewFromUtf8(isolate, "Realm"), realm_template);
+ Local<ObjectTemplate> realm_template = ObjectTemplate::New(isolate);
+ realm_template->Set(
+ String::NewFromUtf8(isolate, "current", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, RealmCurrent));
+ realm_template->Set(
+ String::NewFromUtf8(isolate, "owner", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, RealmOwner));
+ realm_template->Set(
+ String::NewFromUtf8(isolate, "global", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, RealmGlobal));
+ realm_template->Set(
+ String::NewFromUtf8(isolate, "create", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, RealmCreate));
+ realm_template->Set(
+ String::NewFromUtf8(isolate, "dispose", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, RealmDispose));
+ realm_template->Set(
+ String::NewFromUtf8(isolate, "switch", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, RealmSwitch));
+ realm_template->Set(
+ String::NewFromUtf8(isolate, "eval", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, RealmEval));
+ realm_template->SetAccessor(
+ String::NewFromUtf8(isolate, "shared", NewStringType::kNormal)
+ .ToLocalChecked(),
+ RealmSharedGet, RealmSharedSet);
+ global_template->Set(
+ String::NewFromUtf8(isolate, "Realm", NewStringType::kNormal)
+ .ToLocalChecked(),
+ realm_template);
#ifndef V8_SHARED
- Handle<ObjectTemplate> performance_template = ObjectTemplate::New(isolate);
- performance_template->Set(String::NewFromUtf8(isolate, "now"),
- FunctionTemplate::New(isolate, PerformanceNow));
- global_template->Set(String::NewFromUtf8(isolate, "performance"),
- performance_template);
-
- Handle<FunctionTemplate> worker_fun_template =
+ Local<ObjectTemplate> performance_template = ObjectTemplate::New(isolate);
+ performance_template->Set(
+ String::NewFromUtf8(isolate, "now", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, PerformanceNow));
+ global_template->Set(
+ String::NewFromUtf8(isolate, "performance", NewStringType::kNormal)
+ .ToLocalChecked(),
+ performance_template);
+
+ Local<FunctionTemplate> worker_fun_template =
FunctionTemplate::New(isolate, WorkerNew);
+ Local<Signature> worker_signature =
+ Signature::New(isolate, worker_fun_template);
+ worker_fun_template->SetClassName(
+ String::NewFromUtf8(isolate, "Worker", NewStringType::kNormal)
+ .ToLocalChecked());
+ worker_fun_template->ReadOnlyPrototype();
worker_fun_template->PrototypeTemplate()->Set(
- String::NewFromUtf8(isolate, "terminate"),
- FunctionTemplate::New(isolate, WorkerTerminate));
+ String::NewFromUtf8(isolate, "terminate", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, WorkerTerminate, Local<Value>(),
+ worker_signature));
worker_fun_template->PrototypeTemplate()->Set(
- String::NewFromUtf8(isolate, "postMessage"),
- FunctionTemplate::New(isolate, WorkerPostMessage));
+ String::NewFromUtf8(isolate, "postMessage", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, WorkerPostMessage, Local<Value>(),
+ worker_signature));
worker_fun_template->PrototypeTemplate()->Set(
- String::NewFromUtf8(isolate, "getMessage"),
- FunctionTemplate::New(isolate, WorkerGetMessage));
+ String::NewFromUtf8(isolate, "getMessage", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, WorkerGetMessage, Local<Value>(),
+ worker_signature));
worker_fun_template->InstanceTemplate()->SetInternalFieldCount(1);
- global_template->Set(String::NewFromUtf8(isolate, "Worker"),
- worker_fun_template);
+ global_template->Set(
+ String::NewFromUtf8(isolate, "Worker", NewStringType::kNormal)
+ .ToLocalChecked(),
+ worker_fun_template);
#endif // !V8_SHARED
- Handle<ObjectTemplate> os_templ = ObjectTemplate::New(isolate);
+ Local<ObjectTemplate> os_templ = ObjectTemplate::New(isolate);
AddOSMethods(isolate, os_templ);
- global_template->Set(String::NewFromUtf8(isolate, "os"), os_templ);
+ global_template->Set(
+ String::NewFromUtf8(isolate, "os", NewStringType::kNormal)
+ .ToLocalChecked(),
+ os_templ);
+
+#if defined(V8_WASM)
+ // Install WASM API.
+ WasmJs::Install(isolate, global_template);
+#endif
return global_template;
}
@@ -1169,28 +1200,13 @@ void Shell::Initialize(Isolate* isolate) {
}
-void Shell::InitializeDebugger(Isolate* isolate) {
- if (options.test_shell) return;
-#ifndef V8_SHARED
- HandleScope scope(isolate);
- Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
- utility_context_.Reset(isolate,
- Context::New(isolate, NULL, global_template));
- if (utility_context_.IsEmpty()) {
- printf("Failed to initialize debugger\n");
- Shell::Exit(1);
- }
-#endif // !V8_SHARED
-}
-
-
Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
#ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe
- base::LockGuard<base::Mutex> lock_guard(&context_mutex_);
+ base::LockGuard<base::Mutex> lock_guard(context_mutex_.Pointer());
#endif // !V8_SHARED
// Initialize the global objects
- Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
+ Local<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
EscapableHandleScope handle_scope(isolate);
Local<Context> context = Context::New(isolate, NULL, global_template);
DCHECK(!context.IsEmpty());
@@ -1208,8 +1224,12 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
}
i::Handle<i::JSArray> arguments_jsarray =
factory->NewJSArrayWithElements(arguments_array);
- context->Global()->Set(String::NewFromUtf8(isolate, "arguments"),
- Utils::ToLocal(arguments_jsarray));
+ context->Global()
+ ->Set(context,
+ String::NewFromUtf8(isolate, "arguments", NewStringType::kNormal)
+ .ToLocalChecked(),
+ Utils::ToLocal(arguments_jsarray))
+ .FromJust();
#endif // !V8_SHARED
return handle_scope.Escape(context);
}
@@ -1238,8 +1258,6 @@ inline bool operator<(const CounterAndKey& lhs, const CounterAndKey& rhs) {
void Shell::OnExit(v8::Isolate* isolate) {
- LineEditor* line_editor = LineEditor::Get();
- if (line_editor) line_editor->Close();
#ifndef V8_SHARED
reinterpret_cast<i::Isolate*>(isolate)->DumpAndResetCompilationStats();
if (i::FLAG_dump_counters) {
@@ -1364,8 +1382,7 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
data->byte_length = length;
- Handle<v8::ArrayBuffer> buffer =
- ArrayBuffer::New(isolate, data->data, length);
+ Local<v8::ArrayBuffer> buffer = ArrayBuffer::New(isolate, data->data, length);
data->handle.Reset(isolate, buffer);
data->handle.SetWeak(data, ReadBufferWeakCallback,
v8::WeakCallbackType::kParameter);
@@ -1377,12 +1394,13 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Reads a file into a v8 string.
-Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
+Local<String> Shell::ReadFile(Isolate* isolate, const char* name) {
int size = 0;
char* chars = ReadChars(isolate, name, &size);
- if (chars == NULL) return Handle<String>();
- Handle<String> result =
- String::NewFromUtf8(isolate, chars, String::kNormalString, size);
+ if (chars == NULL) return Local<String>();
+ Local<String> result =
+ String::NewFromUtf8(isolate, chars, NewStringType::kNormal, size)
+ .ToLocalChecked();
delete[] chars;
return result;
}
@@ -1394,13 +1412,19 @@ void Shell::RunShell(Isolate* isolate) {
v8::Local<v8::Context>::New(isolate, evaluation_context_);
v8::Context::Scope context_scope(context);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
- Handle<String> name = String::NewFromUtf8(isolate, "(d8)");
- LineEditor* console = LineEditor::Get();
- printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
- console->Open(isolate);
+ Local<String> name =
+ String::NewFromUtf8(isolate, "(d8)", NewStringType::kNormal)
+ .ToLocalChecked();
+ printf("V8 version %s\n", V8::GetVersion());
while (true) {
HandleScope inner_scope(isolate);
- Handle<String> input = console->Prompt(Shell::kPrompt);
+ printf("d8> ");
+#if defined(__native_client__)
+ // Native Client libc is used to being embedded in Chrome and
+ // has trouble recognizing when to flush.
+ fflush(stdout);
+#endif
+ Local<String> input = Shell::ReadFromStdin(isolate);
if (input.IsEmpty()) break;
ExecuteString(isolate, input, name, true, true);
}
@@ -1424,8 +1448,13 @@ void SourceGroup::Execute(Isolate* isolate) {
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
// Execute argument given to -e option directly.
HandleScope handle_scope(isolate);
- Handle<String> file_name = String::NewFromUtf8(isolate, "unnamed");
- Handle<String> source = String::NewFromUtf8(isolate, argv_[i + 1]);
+ Local<String> file_name =
+ String::NewFromUtf8(isolate, "unnamed", NewStringType::kNormal)
+ .ToLocalChecked();
+ Local<String> source =
+ String::NewFromUtf8(isolate, argv_[i + 1], NewStringType::kNormal)
+ .ToLocalChecked();
+ Shell::options.script_executed = true;
if (!Shell::ExecuteString(isolate, source, file_name, false, true)) {
exception_was_thrown = true;
break;
@@ -1443,12 +1472,15 @@ void SourceGroup::Execute(Isolate* isolate) {
// Use all other arguments as names of files to load and run.
HandleScope handle_scope(isolate);
- Handle<String> file_name = String::NewFromUtf8(isolate, arg);
- Handle<String> source = ReadFile(isolate, arg);
+ Local<String> file_name =
+ String::NewFromUtf8(isolate, arg, NewStringType::kNormal)
+ .ToLocalChecked();
+ Local<String> source = ReadFile(isolate, arg);
if (source.IsEmpty()) {
printf("Error reading '%s'\n", arg);
Shell::Exit(1);
}
+ Shell::options.script_executed = true;
if (!Shell::ExecuteString(isolate, source, file_name, false, true,
source_type)) {
exception_was_thrown = true;
@@ -1461,12 +1493,13 @@ void SourceGroup::Execute(Isolate* isolate) {
}
-Handle<String> SourceGroup::ReadFile(Isolate* isolate, const char* name) {
+Local<String> SourceGroup::ReadFile(Isolate* isolate, const char* name) {
int size;
char* chars = ReadChars(isolate, name, &size);
- if (chars == NULL) return Handle<String>();
- Handle<String> result =
- String::NewFromUtf8(isolate, chars, String::kNormalString, size);
+ if (chars == NULL) return Local<String>();
+ Local<String> result =
+ String::NewFromUtf8(isolate, chars, NewStringType::kNormal, size)
+ .ToLocalChecked();
delete[] chars;
return result;
}
@@ -1486,7 +1519,7 @@ void SourceGroup::ExecuteInThread() {
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = Shell::array_buffer_allocator;
Isolate* isolate = Isolate::New(create_params);
- do {
+ for (int i = 0; i < Shell::options.stress_runs; ++i) {
next_semaphore_.Wait();
{
Isolate::Scope iscope(isolate);
@@ -1503,7 +1536,7 @@ void SourceGroup::ExecuteInThread() {
Shell::CollectGarbage(isolate);
}
done_semaphore_.Signal();
- } while (!Shell::options.last_run);
+ }
isolate->Dispose();
}
@@ -1520,31 +1553,37 @@ void SourceGroup::StartExecuteInThread() {
void SourceGroup::WaitForThread() {
if (thread_ == NULL) return;
- if (Shell::options.last_run) {
- thread_->Join();
- } else {
- done_semaphore_.Wait();
- }
+ done_semaphore_.Wait();
+}
+
+
+void SourceGroup::JoinThread() {
+ if (thread_ == NULL) return;
+ thread_->Join();
}
SerializationData::~SerializationData() {
- // Any ArrayBuffer::Contents are owned by this SerializationData object.
- // SharedArrayBuffer::Contents may be used by other threads, so must be
+ // Any ArrayBuffer::Contents are owned by this SerializationData object if
+ // ownership hasn't been transferred out via ReadArrayBufferContents.
+ // SharedArrayBuffer::Contents may be used by multiple threads, so must be
// cleaned up by the main thread in Shell::CleanupWorkers().
- for (int i = 0; i < array_buffer_contents.length(); ++i) {
- ArrayBuffer::Contents& contents = array_buffer_contents[i];
- Shell::array_buffer_allocator->Free(contents.Data(), contents.ByteLength());
+ for (int i = 0; i < array_buffer_contents_.length(); ++i) {
+ ArrayBuffer::Contents& contents = array_buffer_contents_[i];
+ if (contents.Data()) {
+ Shell::array_buffer_allocator->Free(contents.Data(),
+ contents.ByteLength());
+ }
}
}
-void SerializationData::WriteTag(SerializationTag tag) { data.Add(tag); }
+void SerializationData::WriteTag(SerializationTag tag) { data_.Add(tag); }
void SerializationData::WriteMemory(const void* p, int length) {
if (length > 0) {
- i::Vector<uint8_t> block = data.AddBlock(0, length);
+ i::Vector<uint8_t> block = data_.AddBlock(0, length);
memcpy(&block[0], p, length);
}
}
@@ -1552,18 +1591,18 @@ void SerializationData::WriteMemory(const void* p, int length) {
void SerializationData::WriteArrayBufferContents(
const ArrayBuffer::Contents& contents) {
- array_buffer_contents.Add(contents);
+ array_buffer_contents_.Add(contents);
WriteTag(kSerializationTagTransferredArrayBuffer);
- int index = array_buffer_contents.length() - 1;
+ int index = array_buffer_contents_.length() - 1;
Write(index);
}
void SerializationData::WriteSharedArrayBufferContents(
const SharedArrayBuffer::Contents& contents) {
- shared_array_buffer_contents.Add(contents);
+ shared_array_buffer_contents_.Add(contents);
WriteTag(kSerializationTagTransferredSharedArrayBuffer);
- int index = shared_array_buffer_contents.length() - 1;
+ int index = shared_array_buffer_contents_.length() - 1;
Write(index);
}
@@ -1575,7 +1614,7 @@ SerializationTag SerializationData::ReadTag(int* offset) const {
void SerializationData::ReadMemory(void* p, int length, int* offset) const {
if (length > 0) {
- memcpy(p, &data[*offset], length);
+ memcpy(p, &data_[*offset], length);
(*offset) += length;
}
}
@@ -1584,16 +1623,20 @@ void SerializationData::ReadMemory(void* p, int length, int* offset) const {
void SerializationData::ReadArrayBufferContents(ArrayBuffer::Contents* contents,
int* offset) const {
int index = Read<int>(offset);
- DCHECK(index < array_buffer_contents.length());
- *contents = array_buffer_contents[index];
+ DCHECK(index < array_buffer_contents_.length());
+ *contents = array_buffer_contents_[index];
+ // Ownership of this ArrayBuffer::Contents is passed to the caller. Neuter
+ // our copy so it won't be double-free'd when this SerializationData is
+ // destroyed.
+ array_buffer_contents_[index] = ArrayBuffer::Contents();
}
void SerializationData::ReadSharedArrayBufferContents(
SharedArrayBuffer::Contents* contents, int* offset) const {
int index = Read<int>(offset);
- DCHECK(index < shared_array_buffer_contents.length());
- *contents = shared_array_buffer_contents[index];
+ DCHECK(index < shared_array_buffer_contents_.length());
+ *contents = shared_array_buffer_contents_[index];
}
@@ -1605,6 +1648,7 @@ void SerializationDataQueue::Enqueue(SerializationData* data) {
bool SerializationDataQueue::Dequeue(SerializationData** data) {
base::LockGuard<base::Mutex> lock_guard(&mutex_);
+ *data = NULL;
if (data_.is_empty()) return false;
*data = data_.Remove(0);
return true;
@@ -1631,21 +1675,24 @@ Worker::Worker()
out_semaphore_(0),
thread_(NULL),
script_(NULL),
- state_(IDLE) {}
+ running_(false) {}
-Worker::~Worker() { Cleanup(); }
+Worker::~Worker() {
+ delete thread_;
+ thread_ = NULL;
+ delete[] script_;
+ script_ = NULL;
+ in_queue_.Clear();
+ out_queue_.Clear();
+}
-void Worker::StartExecuteInThread(Isolate* isolate, const char* script) {
- if (base::NoBarrier_CompareAndSwap(&state_, IDLE, RUNNING) == IDLE) {
- script_ = i::StrDup(script);
- thread_ = new WorkerThread(this);
- thread_->Start();
- } else {
- // Somehow the Worker was started twice.
- UNREACHABLE();
- }
+void Worker::StartExecuteInThread(const char* script) {
+ running_ = true;
+ script_ = i::StrDup(script);
+ thread_ = new WorkerThread(this);
+ thread_->Start();
}
@@ -1658,20 +1705,26 @@ void Worker::PostMessage(SerializationData* data) {
SerializationData* Worker::GetMessage() {
SerializationData* data = NULL;
while (!out_queue_.Dequeue(&data)) {
- if (base::NoBarrier_Load(&state_) != RUNNING) break;
+ // If the worker is no longer running, and there are no messages in the
+ // queue, don't expect any more messages from it.
+ if (!base::NoBarrier_Load(&running_)) break;
out_semaphore_.Wait();
}
-
return data;
}
void Worker::Terminate() {
- if (base::NoBarrier_CompareAndSwap(&state_, RUNNING, TERMINATED) == RUNNING) {
- // Post NULL to wake the Worker thread message loop.
- PostMessage(NULL);
- thread_->Join();
- }
+ base::NoBarrier_Store(&running_, false);
+ // Post NULL to wake the Worker thread message loop, and tell it to stop
+ // running.
+ PostMessage(NULL);
+}
+
+
+void Worker::WaitForThread() {
+ Terminate();
+ thread_->Join();
}
@@ -1689,42 +1742,48 @@ void Worker::ExecuteInThread() {
Context::Scope cscope(context);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
- Handle<Object> global = context->Global();
- Handle<Value> this_value = External::New(isolate, this);
- Handle<FunctionTemplate> postmessage_fun_template =
+ Local<Object> global = context->Global();
+ Local<Value> this_value = External::New(isolate, this);
+ Local<FunctionTemplate> postmessage_fun_template =
FunctionTemplate::New(isolate, PostMessageOut, this_value);
- Handle<Function> postmessage_fun;
+ Local<Function> postmessage_fun;
if (postmessage_fun_template->GetFunction(context)
.ToLocal(&postmessage_fun)) {
- global->Set(String::NewFromUtf8(isolate, "postMessage"),
- postmessage_fun);
+ global->Set(context, String::NewFromUtf8(isolate, "postMessage",
+ NewStringType::kNormal)
+ .ToLocalChecked(),
+ postmessage_fun).FromJust();
}
// First run the script
- Handle<String> file_name = String::NewFromUtf8(isolate, "unnamed");
- Handle<String> source = String::NewFromUtf8(isolate, script_);
+ Local<String> file_name =
+ String::NewFromUtf8(isolate, "unnamed", NewStringType::kNormal)
+ .ToLocalChecked();
+ Local<String> source =
+ String::NewFromUtf8(isolate, script_, NewStringType::kNormal)
+ .ToLocalChecked();
if (Shell::ExecuteString(isolate, source, file_name, false, true)) {
// Get the message handler
- Handle<Value> onmessage =
- global->Get(String::NewFromUtf8(isolate, "onmessage"));
+ Local<Value> onmessage =
+ global->Get(context, String::NewFromUtf8(isolate, "onmessage",
+ NewStringType::kNormal)
+ .ToLocalChecked()).ToLocalChecked();
if (onmessage->IsFunction()) {
- Handle<Function> onmessage_fun = Handle<Function>::Cast(onmessage);
+ Local<Function> onmessage_fun = Local<Function>::Cast(onmessage);
// Now wait for messages
- bool done = false;
- while (!done) {
+ while (true) {
in_semaphore_.Wait();
SerializationData* data;
if (!in_queue_.Dequeue(&data)) continue;
if (data == NULL) {
- done = true;
break;
}
int offset = 0;
Local<Value> data_value;
if (Shell::DeserializeValue(isolate, *data, &offset)
.ToLocal(&data_value)) {
- Handle<Value> argv[] = {data_value};
+ Local<Value> argv[] = {data_value};
(void)onmessage_fun->Call(context, global, 1, argv);
}
delete data;
@@ -1737,21 +1796,9 @@ void Worker::ExecuteInThread() {
}
isolate->Dispose();
- if (base::NoBarrier_CompareAndSwap(&state_, RUNNING, TERMINATED) == RUNNING) {
- // Post NULL to wake the thread waiting on GetMessage() if there is one.
- out_queue_.Enqueue(NULL);
- out_semaphore_.Signal();
- }
-}
-
-
-void Worker::Cleanup() {
- delete thread_;
- thread_ = NULL;
- delete[] script_;
- script_ = NULL;
- in_queue_.Clear();
- out_queue_.Clear();
+ // Post NULL to wake the thread waiting on GetMessage() if there is one.
+ out_queue_.Enqueue(NULL);
+ out_semaphore_.Signal();
}
@@ -1764,7 +1811,7 @@ void Worker::PostMessageOut(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- Handle<Value> message = args[0];
+ Local<Value> message = args[0];
// TODO(binji): Allow transferring from worker to main thread?
Shell::ObjectList to_transfer;
@@ -1774,7 +1821,7 @@ void Worker::PostMessageOut(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (Shell::SerializeValue(isolate, message, to_transfer, &seen_objects,
data)) {
DCHECK(args.Data()->IsExternal());
- Handle<External> this_value = Handle<External>::Cast(args.Data());
+ Local<External> this_value = Local<External>::Cast(args.Data());
Worker* worker = static_cast<Worker*>(this_value->Value());
worker->out_queue_.Enqueue(data);
worker->out_semaphore_.Signal();
@@ -1861,9 +1908,6 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--dump-counters") == 0) {
printf("D8 with shared library does not include counters\n");
return false;
- } else if (strcmp(argv[i], "--debugger") == 0) {
- printf("Javascript debugger not included\n");
- return false;
#endif // V8_SHARED
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
} else if (strncmp(argv[i], "--natives_blob=", 15) == 0) {
@@ -1909,6 +1953,11 @@ bool Shell::SetOptions(int argc, char* argv[]) {
enable_harmony_modules = true;
} else if (strncmp(argv[i], "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", argv[i]);
+ } else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
+ options.script_executed = true;
+ } else if (strncmp(str, "-", 1) != 0) {
+ // Not a flag, so it must be a script to execute.
+ options.script_executed = true;
}
}
current->End(argc);
@@ -1925,7 +1974,7 @@ bool Shell::SetOptions(int argc, char* argv[]) {
}
-int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
+int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
#ifndef V8_SHARED
for (int i = 1; i < options.num_isolates; ++i) {
options.isolate_sources[i].StartExecuteInThread();
@@ -1934,16 +1983,9 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
{
HandleScope scope(isolate);
Local<Context> context = CreateEvaluationContext(isolate);
- if (options.last_run && options.use_interactive_shell()) {
+ if (last_run && options.use_interactive_shell()) {
// Keep using the same context in the interactive shell.
evaluation_context_.Reset(isolate, context);
-#ifndef V8_SHARED
- // If the interactive debugger is enabled make sure to activate
- // it before running the files passed on the command line.
- if (i::FLAG_debugger) {
- InstallUtilityScript(isolate);
- }
-#endif // !V8_SHARED
}
{
Context::Scope cscope(context);
@@ -1954,7 +1996,11 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
CollectGarbage(isolate);
#ifndef V8_SHARED
for (int i = 1; i < options.num_isolates; ++i) {
- options.isolate_sources[i].WaitForThread();
+ if (last_run) {
+ options.isolate_sources[i].JoinThread();
+ } else {
+ options.isolate_sources[i].WaitForThread();
+ }
}
CleanupWorkers();
#endif // !V8_SHARED
@@ -1978,8 +2024,13 @@ void Shell::CollectGarbage(Isolate* isolate) {
}
+void Shell::EmptyMessageQueues(Isolate* isolate) {
+ while (v8::platform::PumpMessageLoop(g_platform, isolate)) continue;
+}
+
+
#ifndef V8_SHARED
-bool Shell::SerializeValue(Isolate* isolate, Handle<Value> value,
+bool Shell::SerializeValue(Isolate* isolate, Local<Value> value,
const ObjectList& to_transfer,
ObjectList* seen_objects,
SerializationData* out_data) {
@@ -1995,7 +2046,7 @@ bool Shell::SerializeValue(Isolate* isolate, Handle<Value> value,
} else if (value->IsFalse()) {
out_data->WriteTag(kSerializationTagFalse);
} else if (value->IsNumber()) {
- Handle<Number> num = Handle<Number>::Cast(value);
+ Local<Number> num = Local<Number>::Cast(value);
double value = num->Value();
out_data->WriteTag(kSerializationTagNumber);
out_data->Write(value);
@@ -2005,7 +2056,7 @@ bool Shell::SerializeValue(Isolate* isolate, Handle<Value> value,
out_data->Write(str.length());
out_data->WriteMemory(*str, str.length());
} else if (value->IsArray()) {
- Handle<Array> array = Handle<Array>::Cast(value);
+ Local<Array> array = Local<Array>::Cast(value);
if (FindInObjectList(array, *seen_objects)) {
Throw(isolate, "Duplicated arrays not supported");
return false;
@@ -2020,10 +2071,13 @@ bool Shell::SerializeValue(Isolate* isolate, Handle<Value> value,
if (!SerializeValue(isolate, element_value, to_transfer, seen_objects,
out_data))
return false;
+ } else {
+ Throw(isolate, "Failed to serialize array element.");
+ return false;
}
}
} else if (value->IsArrayBuffer()) {
- Handle<ArrayBuffer> array_buffer = Handle<ArrayBuffer>::Cast(value);
+ Local<ArrayBuffer> array_buffer = Local<ArrayBuffer>::Cast(value);
if (FindInObjectList(array_buffer, *seen_objects)) {
Throw(isolate, "Duplicated array buffers not supported");
return false;
@@ -2036,25 +2090,26 @@ bool Shell::SerializeValue(Isolate* isolate, Handle<Value> value,
return false;
}
- ArrayBuffer::Contents contents = array_buffer->Externalize();
+ ArrayBuffer::Contents contents = array_buffer->IsExternal()
+ ? array_buffer->GetContents()
+ : array_buffer->Externalize();
array_buffer->Neuter();
out_data->WriteArrayBufferContents(contents);
} else {
ArrayBuffer::Contents contents = array_buffer->GetContents();
// Clone ArrayBuffer
- if (contents.ByteLength() > i::kMaxUInt32) {
+ if (contents.ByteLength() > i::kMaxInt) {
Throw(isolate, "ArrayBuffer is too big to clone");
return false;
}
- int byte_length = static_cast<int>(contents.ByteLength());
+ int32_t byte_length = static_cast<int32_t>(contents.ByteLength());
out_data->WriteTag(kSerializationTagArrayBuffer);
out_data->Write(byte_length);
- out_data->WriteMemory(contents.Data(),
- static_cast<int>(contents.ByteLength()));
+ out_data->WriteMemory(contents.Data(), byte_length);
}
} else if (value->IsSharedArrayBuffer()) {
- Handle<SharedArrayBuffer> sab = Handle<SharedArrayBuffer>::Cast(value);
+ Local<SharedArrayBuffer> sab = Local<SharedArrayBuffer>::Cast(value);
if (FindInObjectList(sab, *seen_objects)) {
Throw(isolate, "Duplicated shared array buffers not supported");
return false;
@@ -2065,11 +2120,17 @@ bool Shell::SerializeValue(Isolate* isolate, Handle<Value> value,
return false;
}
- SharedArrayBuffer::Contents contents = sab->Externalize();
+ SharedArrayBuffer::Contents contents;
+ if (sab->IsExternal()) {
+ contents = sab->GetContents();
+ } else {
+ contents = sab->Externalize();
+ base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
+ externalized_shared_contents_.Add(contents);
+ }
out_data->WriteSharedArrayBufferContents(contents);
- externalized_shared_contents_.Add(contents);
} else if (value->IsObject()) {
- Handle<Object> object = Handle<Object>::Cast(value);
+ Local<Object> object = Local<Object>::Cast(value);
if (FindInObjectList(object, *seen_objects)) {
Throw(isolate, "Duplicated objects not supported");
return false;
@@ -2085,8 +2146,8 @@ bool Shell::SerializeValue(Isolate* isolate, Handle<Value> value,
out_data->WriteTag(kSerializationTagObject);
out_data->Write(length);
for (uint32_t i = 0; i < length; ++i) {
- Handle<Value> name;
- Handle<Value> property_value;
+ Local<Value> name;
+ Local<Value> property_value;
if (property_names->Get(context, i).ToLocal(&name) &&
object->Get(context, name).ToLocal(&property_value)) {
if (!SerializeValue(isolate, name, to_transfer, seen_objects, out_data))
@@ -2094,6 +2155,9 @@ bool Shell::SerializeValue(Isolate* isolate, Handle<Value> value,
if (!SerializeValue(isolate, property_value, to_transfer, seen_objects,
out_data))
return false;
+ } else {
+ Throw(isolate, "Failed to serialize property.");
+ return false;
}
}
} else {
@@ -2133,47 +2197,43 @@ MaybeLocal<Value> Shell::DeserializeValue(Isolate* isolate,
break;
case kSerializationTagString: {
int length = data.Read<int>(offset);
- static char s_buffer[128];
- char* p = s_buffer;
- bool allocated = false;
- if (length > static_cast<int>(sizeof(s_buffer))) {
- p = new char[length];
- allocated = true;
- }
- data.ReadMemory(p, length, offset);
+ CHECK(length >= 0);
+ std::vector<char> buffer(length + 1); // + 1 so it is never empty.
+ data.ReadMemory(&buffer[0], length, offset);
MaybeLocal<String> str =
- String::NewFromUtf8(isolate, p, String::kNormalString, length);
+ String::NewFromUtf8(isolate, &buffer[0], NewStringType::kNormal,
+ length).ToLocalChecked();
if (!str.IsEmpty()) result = str.ToLocalChecked();
- if (allocated) delete[] p;
break;
}
case kSerializationTagArray: {
uint32_t length = data.Read<uint32_t>(offset);
- Handle<Array> array = Array::New(isolate, length);
+ Local<Array> array = Array::New(isolate, length);
for (uint32_t i = 0; i < length; ++i) {
Local<Value> element_value;
CHECK(DeserializeValue(isolate, data, offset).ToLocal(&element_value));
- array->Set(i, element_value);
+ array->Set(isolate->GetCurrentContext(), i, element_value).FromJust();
}
result = array;
break;
}
case kSerializationTagObject: {
int length = data.Read<int>(offset);
- Handle<Object> object = Object::New(isolate);
+ Local<Object> object = Object::New(isolate);
for (int i = 0; i < length; ++i) {
Local<Value> property_name;
CHECK(DeserializeValue(isolate, data, offset).ToLocal(&property_name));
Local<Value> property_value;
CHECK(DeserializeValue(isolate, data, offset).ToLocal(&property_value));
- object->Set(property_name, property_value);
+ object->Set(isolate->GetCurrentContext(), property_name, property_value)
+ .FromJust();
}
result = object;
break;
}
case kSerializationTagArrayBuffer: {
- int byte_length = data.Read<int>(offset);
- Handle<ArrayBuffer> array_buffer = ArrayBuffer::New(isolate, byte_length);
+ int32_t byte_length = data.Read<int32_t>(offset);
+ Local<ArrayBuffer> array_buffer = ArrayBuffer::New(isolate, byte_length);
ArrayBuffer::Contents contents = array_buffer->GetContents();
DCHECK(static_cast<size_t>(byte_length) == contents.ByteLength());
data.ReadMemory(contents.Data(), byte_length, offset);
@@ -2183,8 +2243,8 @@ MaybeLocal<Value> Shell::DeserializeValue(Isolate* isolate,
case kSerializationTagTransferredArrayBuffer: {
ArrayBuffer::Contents contents;
data.ReadArrayBufferContents(&contents, offset);
- result =
- ArrayBuffer::New(isolate, contents.Data(), contents.ByteLength());
+ result = ArrayBuffer::New(isolate, contents.Data(), contents.ByteLength(),
+ ArrayBufferCreationMode::kInternalized);
break;
}
case kSerializationTagTransferredSharedArrayBuffer: {
@@ -2208,7 +2268,7 @@ void Shell::CleanupWorkers() {
// create a new Worker, it would deadlock.
i::List<Worker*> workers_copy;
{
- base::LockGuard<base::Mutex> lock_guard(&workers_mutex_);
+ base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
allow_new_workers_ = false;
workers_copy.AddAll(workers_);
workers_.Clear();
@@ -2216,15 +2276,13 @@ void Shell::CleanupWorkers() {
for (int i = 0; i < workers_copy.length(); ++i) {
Worker* worker = workers_copy[i];
- worker->Terminate();
+ worker->WaitForThread();
delete worker;
}
// Now that all workers are terminated, we can re-enable Worker creation.
- {
- base::LockGuard<base::Mutex> lock_guard(&workers_mutex_);
- allow_new_workers_ = true;
- }
+ base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
+ allow_new_workers_ = true;
for (int i = 0; i < externalized_shared_contents_.length(); ++i) {
const SharedArrayBuffer::Contents& contents =
@@ -2352,12 +2410,10 @@ int Shell::Main(int argc, char* argv[]) {
}
#endif
Isolate* isolate = Isolate::New(create_params);
- DumbLineEditor dumb_line_editor(isolate);
{
Isolate::Scope scope(isolate);
Initialize(isolate);
PerIsolateData data(isolate);
- InitializeDebugger(isolate);
#ifndef V8_SHARED
if (options.dump_heap_constants) {
@@ -2370,35 +2426,36 @@ int Shell::Main(int argc, char* argv[]) {
Testing::SetStressRunType(options.stress_opt
? Testing::kStressTypeOpt
: Testing::kStressTypeDeopt);
- int stress_runs = Testing::GetStressRuns();
- for (int i = 0; i < stress_runs && result == 0; i++) {
- printf("============ Stress %d/%d ============\n", i + 1, stress_runs);
+ options.stress_runs = Testing::GetStressRuns();
+ for (int i = 0; i < options.stress_runs && result == 0; i++) {
+ printf("============ Stress %d/%d ============\n", i + 1,
+ options.stress_runs);
Testing::PrepareStressRun(i);
- options.last_run = (i == stress_runs - 1);
- result = RunMain(isolate, argc, argv);
+ bool last_run = i == options.stress_runs - 1;
+ result = RunMain(isolate, argc, argv, last_run);
}
printf("======== Full Deoptimization =======\n");
Testing::DeoptimizeAll();
#if !defined(V8_SHARED)
} else if (i::FLAG_stress_runs > 0) {
- int stress_runs = i::FLAG_stress_runs;
- for (int i = 0; i < stress_runs && result == 0; i++) {
- printf("============ Run %d/%d ============\n", i + 1, stress_runs);
- options.last_run = (i == stress_runs - 1);
- result = RunMain(isolate, argc, argv);
+ options.stress_runs = i::FLAG_stress_runs;
+ for (int i = 0; i < options.stress_runs && result == 0; i++) {
+ printf("============ Run %d/%d ============\n", i + 1,
+ options.stress_runs);
+ bool last_run = i == options.stress_runs - 1;
+ result = RunMain(isolate, argc, argv, last_run);
}
#endif
} else {
- result = RunMain(isolate, argc, argv);
+ bool last_run = true;
+ result = RunMain(isolate, argc, argv, last_run);
}
// Run interactive shell if explicitly requested or if no script has been
// executed, but never on --test
if (options.use_interactive_shell()) {
#ifndef V8_SHARED
- if (!i::FLAG_debugger) {
- InstallUtilityScript(isolate);
- }
+ InstallUtilityScript(isolate);
#endif // !V8_SHARED
RunShell(isolate);
}
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index e92a321990..f7e36dd9c9 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -28,7 +28,6 @@
{
'variables': {
'v8_code': 1,
- 'console%': '',
# Enable support for Intel VTune. Supported on ia32/x64 only
'v8_enable_vtunejit%': 0,
'v8_enable_i18n_support%': 1,
@@ -51,14 +50,14 @@
'd8.h',
'd8.cc',
],
+ 'defines': [
+ # TODO(jochen): Remove again after this is globally turned on.
+ 'V8_IMMINENT_DEPRECATION_WARNINGS',
+ ],
'conditions': [
[ 'want_separate_host_toolset==1', {
'toolsets': [ '<(v8_toolset_for_d8)', ],
}],
- [ 'console=="readline"', {
- 'libraries': [ '-lreadline', ],
- 'sources': [ 'd8-readline.cc' ],
- }],
['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
or OS=="openbsd" or OS=="solaris" or OS=="android" \
or OS=="qnx" or OS=="aix")', {
@@ -69,8 +68,6 @@
}],
[ 'component!="shared_library"', {
'sources': [
- 'd8-debug.h',
- 'd8-debug.cc',
'<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
],
'conditions': [
@@ -101,6 +98,9 @@
'<(icu_gyp_path):icudata',
],
}],
+ ['v8_wasm!=0', {
+ 'include_dirs': ['../third_party/wasm'],
+ }],
],
},
{
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 4d723473ea..16f612c97a 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -7,15 +7,17 @@
#ifndef V8_SHARED
#include "src/allocation.h"
+#include "src/base/platform/time.h"
#include "src/hashmap.h"
#include "src/list.h"
-#include "src/smart-pointers.h"
-#include "src/v8.h"
#else
#include "include/v8.h"
#include "src/base/compiler-specific.h"
#endif // !V8_SHARED
+#include "src/base/once.h"
+
+
namespace v8 {
@@ -91,26 +93,6 @@ class CounterMap {
#endif // !V8_SHARED
-class LineEditor {
- public:
- enum Type { DUMB = 0, READLINE = 1 };
- LineEditor(Type type, const char* name);
- virtual ~LineEditor() { }
-
- virtual Handle<String> Prompt(const char* prompt) = 0;
- virtual bool Open(Isolate* isolate) { return true; }
- virtual bool Close() { return true; }
- virtual void AddHistory(const char* str) { }
-
- const char* name() { return name_; }
- static LineEditor* Get() { return current_; }
- private:
- Type type_;
- const char* name_;
- static LineEditor* current_;
-};
-
-
class SourceGroup {
public:
SourceGroup() :
@@ -137,6 +119,7 @@ class SourceGroup {
#ifndef V8_SHARED
void StartExecuteInThread();
void WaitForThread();
+ void JoinThread();
private:
class IsolateThread : public base::Thread {
@@ -161,7 +144,7 @@ class SourceGroup {
#endif // !V8_SHARED
void ExitShell(int exit_code);
- Handle<String> ReadFile(Isolate* isolate, const char* name);
+ Local<String> ReadFile(Isolate* isolate, const char* name);
const char** argv_;
int begin_offset_;
@@ -215,9 +198,9 @@ class SerializationData {
}
private:
- i::List<uint8_t> data;
- i::List<ArrayBuffer::Contents> array_buffer_contents;
- i::List<SharedArrayBuffer::Contents> shared_array_buffer_contents;
+ i::List<uint8_t> data_;
+ i::List<ArrayBuffer::Contents> array_buffer_contents_;
+ i::List<SharedArrayBuffer::Contents> shared_array_buffer_contents_;
};
@@ -239,10 +222,26 @@ class Worker {
Worker();
~Worker();
- void StartExecuteInThread(Isolate* isolate, const char* script);
+ // Run the given script on this Worker. This function should only be called
+ // once, and should only be called by the thread that created the Worker.
+ void StartExecuteInThread(const char* script);
+ // Post a message to the worker's incoming message queue. The worker will
+ // take ownership of the SerializationData.
+ // This function should only be called by the thread that created the Worker.
void PostMessage(SerializationData* data);
+ // Synchronously retrieve messages from the worker's outgoing message queue.
+ // If there is no message in the queue, block until a message is available.
+ // If there are no messages in the queue and the worker is no longer running,
+ // return nullptr.
+ // This function should only be called by the thread that created the Worker.
SerializationData* GetMessage();
+ // Terminate the worker's event loop. Messages from the worker that have been
+ // queued can still be read via GetMessage().
+ // This function can be called by any thread.
void Terminate();
+ // Terminate and join the thread.
+ // This function can be called by any thread.
+ void WaitForThread();
private:
class WorkerThread : public base::Thread {
@@ -257,10 +256,7 @@ class Worker {
Worker* worker_;
};
- enum State { IDLE, RUNNING, TERMINATED };
-
void ExecuteInThread();
- void Cleanup();
static void PostMessageOut(const v8::FunctionCallbackInfo<v8::Value>& args);
base::Semaphore in_semaphore_;
@@ -269,7 +265,7 @@ class Worker {
SerializationDataQueue out_queue_;
base::Thread* thread_;
char* script_;
- base::Atomic32 state_;
+ base::Atomic32 running_;
};
#endif // !V8_SHARED
@@ -278,12 +274,12 @@ class ShellOptions {
public:
ShellOptions()
: script_executed(false),
- last_run(true),
send_idle_notification(false),
invoke_weak_callbacks(false),
omit_quit(false),
stress_opt(false),
stress_deopt(false),
+ stress_runs(1),
interactive_shell(false),
test_shell(false),
dump_heap_constants(false),
@@ -305,12 +301,12 @@ class ShellOptions {
}
bool script_executed;
- bool last_run;
bool send_idle_notification;
bool invoke_weak_callbacks;
bool omit_quit;
bool stress_opt;
bool stress_deopt;
+ int stress_runs;
bool interactive_shell;
bool test_shell;
bool dump_heap_constants;
@@ -333,29 +329,30 @@ class Shell : public i::AllStatic {
public:
enum SourceType { SCRIPT, MODULE };
- static Local<Script> CompileString(
+ static MaybeLocal<Script> CompileString(
Isolate* isolate, Local<String> source, Local<Value> name,
v8::ScriptCompiler::CompileOptions compile_options,
SourceType source_type);
- static bool ExecuteString(Isolate* isolate, Handle<String> source,
- Handle<Value> name, bool print_result,
+ static bool ExecuteString(Isolate* isolate, Local<String> source,
+ Local<Value> name, bool print_result,
bool report_exceptions,
SourceType source_type = SCRIPT);
static const char* ToCString(const v8::String::Utf8Value& value);
static void ReportException(Isolate* isolate, TryCatch* try_catch);
- static Handle<String> ReadFile(Isolate* isolate, const char* name);
+ static Local<String> ReadFile(Isolate* isolate, const char* name);
static Local<Context> CreateEvaluationContext(Isolate* isolate);
- static int RunMain(Isolate* isolate, int argc, char* argv[]);
+ static int RunMain(Isolate* isolate, int argc, char* argv[], bool last_run);
static int Main(int argc, char* argv[]);
static void Exit(int exit_code);
static void OnExit(Isolate* isolate);
static void CollectGarbage(Isolate* isolate);
+ static void EmptyMessageQueues(Isolate* isolate);
#ifndef V8_SHARED
// TODO(binji): stupid implementation for now. Is there an easy way to hash an
// object for use in i::HashMap? By pointer?
- typedef i::List<Handle<Object>> ObjectList;
- static bool SerializeValue(Isolate* isolate, Handle<Value> value,
+ typedef i::List<Local<Object>> ObjectList;
+ static bool SerializeValue(Isolate* isolate, Local<Value> value,
const ObjectList& to_transfer,
ObjectList* seen_objects,
SerializationData* out_data);
@@ -363,9 +360,6 @@ class Shell : public i::AllStatic {
const SerializationData& data,
int* offset);
static void CleanupWorkers();
- static Handle<Array> GetCompletions(Isolate* isolate,
- Handle<String> text,
- Handle<String> full);
static int* LookupCounter(const char* name);
static void* CreateHistogram(const char* name,
int min,
@@ -374,11 +368,6 @@ class Shell : public i::AllStatic {
static void AddHistogramSample(void* histogram, int sample);
static void MapCounters(v8::Isolate* isolate, const char* name);
- static Local<Object> DebugMessageDetails(Isolate* isolate,
- Handle<String> message);
- static Local<Value> DebugCommandToJSONRequest(Isolate* isolate,
- Handle<String> command);
-
static void PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args);
#endif // !V8_SHARED
@@ -397,11 +386,12 @@ class Shell : public i::AllStatic {
static void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Write(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args);
static void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Version(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Read(const v8::FunctionCallbackInfo<v8::Value>& args);
static void ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
- static Handle<String> ReadFromStdin(Isolate* isolate);
+ static Local<String> ReadFromStdin(Isolate* isolate);
static void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(ReadFromStdin(args.GetIsolate()));
}
@@ -446,26 +436,27 @@ class Shell : public i::AllStatic {
static void RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
static void AddOSMethods(v8::Isolate* isolate,
- Handle<ObjectTemplate> os_template);
+ Local<ObjectTemplate> os_template);
static const char* kPrompt;
static ShellOptions options;
static ArrayBuffer::Allocator* array_buffer_allocator;
private:
- static Persistent<Context> evaluation_context_;
+ static Global<Context> evaluation_context_;
+ static base::OnceType quit_once_;
#ifndef V8_SHARED
- static Persistent<Context> utility_context_;
+ static Global<Context> utility_context_;
static CounterMap* counter_map_;
// We statically allocate a set of local counters to be used if we
// don't want to store the stats in a memory-mapped file
static CounterCollection local_counters_;
static CounterCollection* counters_;
static base::OS::MemoryMappedFile* counters_file_;
- static base::Mutex context_mutex_;
+ static base::LazyMutex context_mutex_;
static const base::TimeTicks kInitialTicks;
- static base::Mutex workers_mutex_;
+ static base::LazyMutex workers_mutex_;
static bool allow_new_workers_;
static i::List<Worker*> workers_;
static i::List<SharedArrayBuffer::Contents> externalized_shared_contents_;
@@ -474,10 +465,9 @@ class Shell : public i::AllStatic {
static void InstallUtilityScript(Isolate* isolate);
#endif // !V8_SHARED
static void Initialize(Isolate* isolate);
- static void InitializeDebugger(Isolate* isolate);
static void RunShell(Isolate* isolate);
static bool SetOptions(int argc, char* argv[]);
- static Handle<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
+ static Local<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
};
diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js
index 2b927aface..8d55c788e2 100644
--- a/deps/v8/src/d8.js
+++ b/deps/v8/src/d8.js
@@ -4,1963 +4,6 @@
"use strict";
-String.prototype.startsWith = function (str) {
- if (str.length > this.length) {
- return false;
- }
- return this.substr(0, str.length) == str;
-};
-
-function log10(num) {
- return Math.log(num)/Math.log(10);
-}
-
-function ToInspectableObject(obj) {
- if (!obj && typeof obj === 'object') {
- return UNDEFINED;
- } else {
- return Object(obj);
- }
-}
-
-function GetCompletions(global, last, full) {
- var full_tokens = full.split();
- full = full_tokens.pop();
- var parts = full.split('.');
- parts.pop();
- var current = global;
- for (var i = 0; i < parts.length; i++) {
- var part = parts[i];
- var next = current[part];
- if (!next) {
- return [];
- }
- current = next;
- }
- var result = [];
- current = ToInspectableObject(current);
- while (typeof current !== 'undefined') {
- var mirror = new $debug.ObjectMirror(current);
- var properties = mirror.properties();
- for (var i = 0; i < properties.length; i++) {
- var name = properties[i].name();
- if (typeof name === 'string' && name.startsWith(last)) {
- result.push(name);
- }
- }
- current = ToInspectableObject(Object.getPrototypeOf(current));
- }
- return result;
-}
-
-
-// Global object holding debugger related constants and state.
-var Debug = {};
-
-
-// Debug events which can occour in the V8 JavaScript engine. These originate
-// from the API include file v8-debug.h.
-Debug.DebugEvent = { Break: 1,
- Exception: 2,
- NewFunction: 3,
- BeforeCompile: 4,
- AfterCompile: 5 };
-
-
-// The different types of scripts matching enum ScriptType in objects.h.
-Debug.ScriptType = { Native: 0,
- Extension: 1,
- Normal: 2 };
-
-
-// The different types of script compilations matching enum
-// Script::CompilationType in objects.h.
-Debug.ScriptCompilationType = { Host: 0,
- Eval: 1,
- JSON: 2 };
-
-
-// The different types of scopes matching constants runtime.cc.
-Debug.ScopeType = { Global: 0,
- Local: 1,
- With: 2,
- Closure: 3,
- Catch: 4,
- Block: 5 };
-
-
-// Current debug state.
-var kNoFrame = -1;
-Debug.State = {
- currentFrame: kNoFrame,
- displaySourceStartLine: -1,
- displaySourceEndLine: -1,
- currentSourceLine: -1
-};
-var trace_compile = false; // Tracing all compile events?
-var trace_debug_json = false; // Tracing all debug json packets?
-var last_cmd = '';
-var repeat_cmd_line = '';
-var is_running = true;
-// Global variable used to store whether a handle was requested.
-var lookup_handle = null;
-
-// Copied from debug-delay.js. This is needed below:
-function ScriptTypeFlag(type) {
- return (1 << type);
-}
-
-
-// Process a debugger JSON message into a display text and a running status.
-// This function returns an object with properties "text" and "running" holding
-// this information.
-function DebugMessageDetails(message) {
- if (trace_debug_json) {
- print("received: '" + message + "'");
- }
- // Convert the JSON string to an object.
- var response = new ProtocolPackage(message);
- is_running = response.running();
-
- if (response.type() == 'event') {
- return DebugEventDetails(response);
- } else {
- return DebugResponseDetails(response);
- }
-}
-
-function DebugEventDetails(response) {
- var details = {text:'', running:false};
-
- // Get the running state.
- details.running = response.running();
-
- var body = response.body();
- var result = '';
- switch (response.event()) {
- case 'break':
- if (body.breakpoints) {
- result += 'breakpoint';
- if (body.breakpoints.length > 1) {
- result += 's';
- }
- result += ' #';
- for (var i = 0; i < body.breakpoints.length; i++) {
- if (i > 0) {
- result += ', #';
- }
- result += body.breakpoints[i];
- }
- } else {
- result += 'break';
- }
- result += ' in ';
- result += body.invocationText;
- result += ', ';
- result += SourceInfo(body);
- result += '\n';
- result += SourceUnderline(body.sourceLineText, body.sourceColumn);
- Debug.State.currentSourceLine = body.sourceLine;
- Debug.State.displaySourceStartLine = -1;
- Debug.State.displaySourceEndLine = -1;
- Debug.State.currentFrame = 0;
- details.text = result;
- break;
-
- case 'exception':
- if (body.uncaught) {
- result += 'Uncaught: ';
- } else {
- result += 'Exception: ';
- }
- result += '"';
- result += body.exception.text;
- result += '"';
- if (body.sourceLine >= 0) {
- result += ', ';
- result += SourceInfo(body);
- result += '\n';
- result += SourceUnderline(body.sourceLineText, body.sourceColumn);
- Debug.State.currentSourceLine = body.sourceLine;
- Debug.State.displaySourceStartLine = -1;
- Debug.State.displaySourceEndLine = -1;
- Debug.State.currentFrame = 0;
- } else {
- result += ' (empty stack)';
- Debug.State.currentSourceLine = -1;
- Debug.State.displaySourceStartLine = -1;
- Debug.State.displaySourceEndLine = -1;
- Debug.State.currentFrame = kNoFrame;
- }
- details.text = result;
- break;
-
- case 'afterCompile':
- if (trace_compile) {
- result = 'Source ' + body.script.name + ' compiled:\n';
- var source = body.script.source;
- if (!(source[source.length - 1] == '\n')) {
- result += source;
- } else {
- result += source.substring(0, source.length - 1);
- }
- }
- details.text = result;
- break;
-
- default:
- details.text = 'Unknown debug event ' + response.event();
- }
-
- return details;
-}
-
-
-function SourceInfo(body) {
- var result = '';
-
- if (body.script) {
- if (body.script.name) {
- result += body.script.name;
- } else {
- result += '[unnamed]';
- }
- }
- result += ' line ';
- result += body.sourceLine + 1;
- result += ' column ';
- result += body.sourceColumn + 1;
-
- return result;
-}
-
-
-function SourceUnderline(source_text, position) {
- if (!source_text) {
- return;
- }
-
- // Create an underline with a caret pointing to the source position. If the
- // source contains a tab character the underline will have a tab character in
- // the same place otherwise the underline will have a space character.
- var underline = '';
- for (var i = 0; i < position; i++) {
- if (source_text[i] == '\t') {
- underline += '\t';
- } else {
- underline += ' ';
- }
- }
- underline += '^';
-
- // Return the source line text with the underline beneath.
- return source_text + '\n' + underline;
-}
-
-
-// Converts a text command to a JSON request.
-function DebugCommandToJSONRequest(cmd_line) {
- var result = new DebugRequest(cmd_line).JSONRequest();
- if (trace_debug_json && result) {
- print("sending: '" + result + "'");
- }
- return result;
-}
-
-
-function DebugRequest(cmd_line) {
- // If the very first character is a { assume that a JSON request have been
- // entered as a command. Converting that to a JSON request is trivial.
- if (cmd_line && cmd_line.length > 0 && cmd_line.charAt(0) == '{') {
- this.request_ = cmd_line;
- return;
- }
-
- // Check for a simple carriage return to repeat the last command:
- var is_repeating = false;
- if (cmd_line == '\n') {
- if (is_running) {
- cmd_line = 'break'; // Not in debugger mode, break with a frame request.
- } else {
- cmd_line = repeat_cmd_line; // use command to repeat.
- is_repeating = true;
- }
- }
- if (!is_running) { // Only save the command if in debugger mode.
- repeat_cmd_line = cmd_line; // save last command.
- }
-
- // Trim string for leading and trailing whitespace.
- cmd_line = cmd_line.replace(/^\s+|\s+$/g, '');
-
- // Find the command.
- var pos = cmd_line.indexOf(' ');
- var cmd;
- var args;
- if (pos == -1) {
- cmd = cmd_line;
- args = '';
- } else {
- cmd = cmd_line.slice(0, pos);
- args = cmd_line.slice(pos).replace(/^\s+|\s+$/g, '');
- }
-
- if ((cmd === undefined) || !cmd) {
- this.request_ = UNDEFINED;
- return;
- }
-
- last_cmd = cmd;
-
- // Switch on command.
- switch (cmd) {
- case 'continue':
- case 'c':
- this.request_ = this.continueCommandToJSONRequest_(args);
- break;
-
- case 'step':
- case 's':
- this.request_ = this.stepCommandToJSONRequest_(args, 'in');
- break;
-
- case 'stepi':
- case 'si':
- this.request_ = this.stepCommandToJSONRequest_(args, 'min');
- break;
-
- case 'next':
- case 'n':
- this.request_ = this.stepCommandToJSONRequest_(args, 'next');
- break;
-
- case 'finish':
- case 'fin':
- this.request_ = this.stepCommandToJSONRequest_(args, 'out');
- break;
-
- case 'backtrace':
- case 'bt':
- this.request_ = this.backtraceCommandToJSONRequest_(args);
- break;
-
- case 'frame':
- case 'f':
- this.request_ = this.frameCommandToJSONRequest_(args);
- break;
-
- case 'scopes':
- this.request_ = this.scopesCommandToJSONRequest_(args);
- break;
-
- case 'scope':
- this.request_ = this.scopeCommandToJSONRequest_(args);
- break;
-
- case 'disconnect':
- case 'exit':
- case 'quit':
- this.request_ = this.disconnectCommandToJSONRequest_(args);
- break;
-
- case 'up':
- this.request_ =
- this.frameCommandToJSONRequest_('' +
- (Debug.State.currentFrame + 1));
- break;
-
- case 'down':
- case 'do':
- this.request_ =
- this.frameCommandToJSONRequest_('' +
- (Debug.State.currentFrame - 1));
- break;
-
- case 'set':
- case 'print':
- case 'p':
- this.request_ = this.printCommandToJSONRequest_(args);
- break;
-
- case 'dir':
- this.request_ = this.dirCommandToJSONRequest_(args);
- break;
-
- case 'references':
- this.request_ = this.referencesCommandToJSONRequest_(args);
- break;
-
- case 'instances':
- this.request_ = this.instancesCommandToJSONRequest_(args);
- break;
-
- case 'list':
- case 'l':
- this.request_ = this.listCommandToJSONRequest_(args);
- break;
- case 'source':
- this.request_ = this.sourceCommandToJSONRequest_(args);
- break;
-
- case 'scripts':
- case 'script':
- case 'scr':
- this.request_ = this.scriptsCommandToJSONRequest_(args);
- break;
-
- case 'break':
- case 'b':
- this.request_ = this.breakCommandToJSONRequest_(args);
- break;
-
- case 'breakpoints':
- case 'bb':
- this.request_ = this.breakpointsCommandToJSONRequest_(args);
- break;
-
- case 'clear':
- case 'delete':
- case 'd':
- this.request_ = this.clearCommandToJSONRequest_(args);
- break;
-
- case 'threads':
- this.request_ = this.threadsCommandToJSONRequest_(args);
- break;
-
- case 'cond':
- this.request_ = this.changeBreakpointCommandToJSONRequest_(args, 'cond');
- break;
-
- case 'enable':
- case 'en':
- this.request_ =
- this.changeBreakpointCommandToJSONRequest_(args, 'enable');
- break;
-
- case 'disable':
- case 'dis':
- this.request_ =
- this.changeBreakpointCommandToJSONRequest_(args, 'disable');
- break;
-
- case 'ignore':
- this.request_ =
- this.changeBreakpointCommandToJSONRequest_(args, 'ignore');
- break;
-
- case 'info':
- case 'inf':
- this.request_ = this.infoCommandToJSONRequest_(args);
- break;
-
- case 'flags':
- this.request_ = this.v8FlagsToJSONRequest_(args);
- break;
-
- case 'gc':
- this.request_ = this.gcToJSONRequest_(args);
- break;
-
- case 'trace':
- case 'tr':
- // Return undefined to indicate command handled internally (no JSON).
- this.request_ = UNDEFINED;
- this.traceCommand_(args);
- break;
-
- case 'help':
- case '?':
- this.helpCommand_(args);
- // Return undefined to indicate command handled internally (no JSON).
- this.request_ = UNDEFINED;
- break;
-
- default:
- throw new Error('Unknown command "' + cmd + '"');
- }
-}
-
-DebugRequest.prototype.JSONRequest = function() {
- return this.request_;
-};
-
-
-function RequestPacket(command) {
- this.seq = 0;
- this.type = 'request';
- this.command = command;
-}
-
-
-RequestPacket.prototype.toJSONProtocol = function() {
- // Encode the protocol header.
- var json = '{';
- json += '"seq":' + this.seq;
- json += ',"type":"' + this.type + '"';
- if (this.command) {
- json += ',"command":' + JSON.stringify(this.command);
- }
- if (this.arguments) {
- json += ',"arguments":';
- // Encode the arguments part.
- if (this.arguments.toJSONProtocol) {
- json += this.arguments.toJSONProtocol();
- } else {
- json += JSON.stringify(this.arguments);
- }
- }
- json += '}';
- return json;
-};
-
-
-DebugRequest.prototype.createRequest = function(command) {
- return new RequestPacket(command);
-};
-
-
-// Create a JSON request for the evaluation command.
-DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) {
- lookup_handle = null;
-
- // Check if the expression is a handle id in the form #<handle>#.
- var handle_match = expression.match(/^#([0-9]*)#$/);
- if (handle_match) {
- // Remember the handle requested in a global variable.
- lookup_handle = parseInt(handle_match[1]);
- // Build a lookup request.
- var request = this.createRequest('lookup');
- request.arguments = {};
- request.arguments.handles = [ lookup_handle ];
- return request.toJSONProtocol();
- } else {
- // Build an evaluate request.
- var request = this.createRequest('evaluate');
- request.arguments = {};
- request.arguments.expression = expression;
- // Request a global evaluation if there is no current frame.
- if (Debug.State.currentFrame == kNoFrame) {
- request.arguments.global = true;
- }
- return request.toJSONProtocol();
- }
-};
-
-
-// Create a JSON request for the references/instances command.
-DebugRequest.prototype.makeReferencesJSONRequest_ = function(handle, type) {
- // Build a references request.
- var handle_match = handle.match(/^#([0-9]*)#$/);
- if (handle_match) {
- var request = this.createRequest('references');
- request.arguments = {};
- request.arguments.type = type;
- request.arguments.handle = parseInt(handle_match[1]);
- return request.toJSONProtocol();
- } else {
- throw new Error('Invalid object id.');
- }
-};
-
-
-// Create a JSON request for the continue command.
-DebugRequest.prototype.continueCommandToJSONRequest_ = function(args) {
- var request = this.createRequest('continue');
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the step command.
-DebugRequest.prototype.stepCommandToJSONRequest_ = function(args, type) {
- // Requesting a step is through the continue command with additional
- // arguments.
- var request = this.createRequest('continue');
- request.arguments = {};
-
- // Process arguments if any.
-
- // Only process args if the command is 'step' which is indicated by type being
- // set to 'in'. For all other commands, ignore the args.
- if (args && args.length > 0) {
- args = args.split(/\s+/g);
-
- if (args.length > 2) {
- throw new Error('Invalid step arguments.');
- }
-
- if (args.length > 0) {
- // Check if we have a gdb stype step command. If so, the 1st arg would
- // be the step count. If it's not a number, then assume that we're
- // parsing for the legacy v8 step command.
- var stepcount = Number(args[0]);
- if (stepcount == Number.NaN) {
- // No step count at arg 1. Process as legacy d8 step command:
- if (args.length == 2) {
- var stepcount = parseInt(args[1]);
- if (isNaN(stepcount) || stepcount <= 0) {
- throw new Error('Invalid step count argument "' + args[0] + '".');
- }
- request.arguments.stepcount = stepcount;
- }
-
- // Get the step action.
- switch (args[0]) {
- case 'in':
- case 'i':
- request.arguments.stepaction = 'in';
- break;
-
- case 'min':
- case 'm':
- request.arguments.stepaction = 'min';
- break;
-
- case 'next':
- case 'n':
- request.arguments.stepaction = 'next';
- break;
-
- case 'out':
- case 'o':
- request.arguments.stepaction = 'out';
- break;
-
- default:
- throw new Error('Invalid step argument "' + args[0] + '".');
- }
-
- } else {
- // gdb style step commands:
- request.arguments.stepaction = type;
- request.arguments.stepcount = stepcount;
- }
- }
- } else {
- // Default is step of the specified type.
- request.arguments.stepaction = type;
- }
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the backtrace command.
-DebugRequest.prototype.backtraceCommandToJSONRequest_ = function(args) {
- // Build a backtrace request from the text command.
- var request = this.createRequest('backtrace');
-
- // Default is to show top 10 frames.
- request.arguments = {};
- request.arguments.fromFrame = 0;
- request.arguments.toFrame = 10;
-
- args = args.split(/\s*[ ]+\s*/g);
- if (args.length == 1 && args[0].length > 0) {
- var frameCount = parseInt(args[0]);
- if (frameCount > 0) {
- // Show top frames.
- request.arguments.fromFrame = 0;
- request.arguments.toFrame = frameCount;
- } else {
- // Show bottom frames.
- request.arguments.fromFrame = 0;
- request.arguments.toFrame = -frameCount;
- request.arguments.bottom = true;
- }
- } else if (args.length == 2) {
- var fromFrame = parseInt(args[0]);
- var toFrame = parseInt(args[1]);
- if (isNaN(fromFrame) || fromFrame < 0) {
- throw new Error('Invalid start frame argument "' + args[0] + '".');
- }
- if (isNaN(toFrame) || toFrame < 0) {
- throw new Error('Invalid end frame argument "' + args[1] + '".');
- }
- if (fromFrame > toFrame) {
- throw new Error('Invalid arguments start frame cannot be larger ' +
- 'than end frame.');
- }
- // Show frame range.
- request.arguments.fromFrame = fromFrame;
- request.arguments.toFrame = toFrame + 1;
- } else if (args.length > 2) {
- throw new Error('Invalid backtrace arguments.');
- }
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the frame command.
-DebugRequest.prototype.frameCommandToJSONRequest_ = function(args) {
- // Build a frame request from the text command.
- var request = this.createRequest('frame');
- args = args.split(/\s*[ ]+\s*/g);
- if (args.length > 0 && args[0].length > 0) {
- request.arguments = {};
- request.arguments.number = args[0];
- }
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the scopes command.
-DebugRequest.prototype.scopesCommandToJSONRequest_ = function(args) {
- // Build a scopes request from the text command.
- var request = this.createRequest('scopes');
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the scope command.
-DebugRequest.prototype.scopeCommandToJSONRequest_ = function(args) {
- // Build a scope request from the text command.
- var request = this.createRequest('scope');
- args = args.split(/\s*[ ]+\s*/g);
- if (args.length > 0 && args[0].length > 0) {
- request.arguments = {};
- request.arguments.number = args[0];
- }
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the print command.
-DebugRequest.prototype.printCommandToJSONRequest_ = function(args) {
- // Build an evaluate request from the text command.
- if (args.length == 0) {
- throw new Error('Missing expression.');
- }
- return this.makeEvaluateJSONRequest_(args);
-};
-
-
-// Create a JSON request for the dir command.
-DebugRequest.prototype.dirCommandToJSONRequest_ = function(args) {
- // Build an evaluate request from the text command.
- if (args.length == 0) {
- throw new Error('Missing expression.');
- }
- return this.makeEvaluateJSONRequest_(args);
-};
-
-
-// Create a JSON request for the references command.
-DebugRequest.prototype.referencesCommandToJSONRequest_ = function(args) {
- // Build an evaluate request from the text command.
- if (args.length == 0) {
- throw new Error('Missing object id.');
- }
-
- return this.makeReferencesJSONRequest_(args, 'referencedBy');
-};
-
-
-// Create a JSON request for the instances command.
-DebugRequest.prototype.instancesCommandToJSONRequest_ = function(args) {
- // Build an evaluate request from the text command.
- if (args.length == 0) {
- throw new Error('Missing object id.');
- }
-
- // Build a references request.
- return this.makeReferencesJSONRequest_(args, 'constructedBy');
-};
-
-
-// Create a JSON request for the list command.
-DebugRequest.prototype.listCommandToJSONRequest_ = function(args) {
-
- // Default is ten lines starting five lines before the current location.
- if (Debug.State.displaySourceEndLine == -1) {
- // If we list forwards, we will start listing after the last source end
- // line. Set it to start from 5 lines before the current location.
- Debug.State.displaySourceEndLine = Debug.State.currentSourceLine - 5;
- // If we list backwards, we will start listing backwards from the last
- // source start line. Set it to start from 1 lines before the current
- // location.
- Debug.State.displaySourceStartLine = Debug.State.currentSourceLine + 1;
- }
-
- var from = Debug.State.displaySourceEndLine + 1;
- var lines = 10;
-
- // Parse the arguments.
- args = args.split(/\s*,\s*/g);
- if (args == '') {
- } else if ((args.length == 1) && (args[0] == '-')) {
- from = Debug.State.displaySourceStartLine - lines;
- } else if (args.length == 2) {
- from = parseInt(args[0]);
- lines = parseInt(args[1]) - from + 1; // inclusive of the ending line.
- } else {
- throw new Error('Invalid list arguments.');
- }
- Debug.State.displaySourceStartLine = from;
- Debug.State.displaySourceEndLine = from + lines - 1;
- var sourceArgs = '' + from + ' ' + lines;
- return this.sourceCommandToJSONRequest_(sourceArgs);
-};
-
-
-// Create a JSON request for the source command.
-DebugRequest.prototype.sourceCommandToJSONRequest_ = function(args) {
- // Build a evaluate request from the text command.
- var request = this.createRequest('source');
-
- // Default is ten lines starting five lines before the current location.
- var from = Debug.State.currentSourceLine - 5;
- var lines = 10;
-
- // Parse the arguments.
- args = args.split(/\s*[ ]+\s*/g);
- if (args.length > 1 && args[0].length > 0 && args[1].length > 0) {
- from = parseInt(args[0]) - 1;
- lines = parseInt(args[1]);
- } else if (args.length > 0 && args[0].length > 0) {
- from = parseInt(args[0]) - 1;
- }
-
- if (from < 0) from = 0;
- if (lines < 0) lines = 10;
-
- // Request source arround current source location.
- request.arguments = {};
- request.arguments.fromLine = from;
- request.arguments.toLine = from + lines;
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the scripts command.
-DebugRequest.prototype.scriptsCommandToJSONRequest_ = function(args) {
- // Build a evaluate request from the text command.
- var request = this.createRequest('scripts');
-
- // Process arguments if any.
- if (args && args.length > 0) {
- args = args.split(/\s*[ ]+\s*/g);
-
- if (args.length > 1) {
- throw new Error('Invalid scripts arguments.');
- }
-
- request.arguments = {};
- switch (args[0]) {
- case 'natives':
- request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Native);
- break;
-
- case 'extensions':
- request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Extension);
- break;
-
- case 'all':
- request.arguments.types =
- ScriptTypeFlag(Debug.ScriptType.Normal) |
- ScriptTypeFlag(Debug.ScriptType.Native) |
- ScriptTypeFlag(Debug.ScriptType.Extension);
- break;
-
- default:
- // If the arg is not one of the know one aboves, then it must be a
- // filter used for filtering the results:
- request.arguments.filter = args[0];
- break;
- }
- }
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the break command.
-DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) {
- // Build a evaluate request from the text command.
- // Process arguments if any.
- if (args && args.length > 0) {
- var target = args;
- var type = 'function';
- var line;
- var column;
- var condition;
- var pos;
-
- var request = this.createRequest('setbreakpoint');
-
- // Break the args into target spec and condition if appropriate.
-
- // Check for breakpoint condition.
- pos = args.indexOf(' ');
- if (pos > 0) {
- target = args.substring(0, pos);
- condition = args.substring(pos + 1, args.length);
- }
-
- // Check for script breakpoint (name:line[:column]). If no ':' in break
- // specification it is considered a function break point.
- pos = target.indexOf(':');
- if (pos > 0) {
- var tmp = target.substring(pos + 1, target.length);
- target = target.substring(0, pos);
- if (target[0] == '/' && target[target.length - 1] == '/') {
- type = 'scriptRegExp';
- target = target.substring(1, target.length - 1);
- } else {
- type = 'script';
- }
-
- // Check for both line and column.
- pos = tmp.indexOf(':');
- if (pos > 0) {
- column = parseInt(tmp.substring(pos + 1, tmp.length)) - 1;
- line = parseInt(tmp.substring(0, pos)) - 1;
- } else {
- line = parseInt(tmp) - 1;
- }
- } else if (target[0] == '#' && target[target.length - 1] == '#') {
- type = 'handle';
- target = target.substring(1, target.length - 1);
- } else {
- type = 'function';
- }
-
- request.arguments = {};
- request.arguments.type = type;
- request.arguments.target = target;
- request.arguments.line = line;
- request.arguments.column = column;
- request.arguments.condition = condition;
- } else {
- var request = this.createRequest('suspend');
- }
-
- return request.toJSONProtocol();
-};
-
-
-DebugRequest.prototype.breakpointsCommandToJSONRequest_ = function(args) {
- if (args && args.length > 0) {
- throw new Error('Unexpected arguments.');
- }
- var request = this.createRequest('listbreakpoints');
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the clear command.
-DebugRequest.prototype.clearCommandToJSONRequest_ = function(args) {
- // Build a evaluate request from the text command.
- var request = this.createRequest('clearbreakpoint');
-
- // Process arguments if any.
- if (args && args.length > 0) {
- request.arguments = {};
- request.arguments.breakpoint = parseInt(args);
- } else {
- throw new Error('Invalid break arguments.');
- }
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the change breakpoint command.
-DebugRequest.prototype.changeBreakpointCommandToJSONRequest_ =
- function(args, command) {
-
- var request;
-
- // Check for exception breaks first:
- // en[able] exc[eptions] [all|unc[aught]]
- // en[able] [all|unc[aught]] exc[eptions]
- // dis[able] exc[eptions] [all|unc[aught]]
- // dis[able] [all|unc[aught]] exc[eptions]
- if ((command == 'enable' || command == 'disable') &&
- args && args.length > 1) {
- var nextPos = args.indexOf(' ');
- var arg1 = (nextPos > 0) ? args.substring(0, nextPos) : args;
- var excType = null;
-
- // Check for:
- // en[able] exc[eptions] [all|unc[aught]]
- // dis[able] exc[eptions] [all|unc[aught]]
- if (arg1 == 'exc' || arg1 == 'exception' || arg1 == 'exceptions') {
-
- var arg2 = (nextPos > 0) ?
- args.substring(nextPos + 1, args.length) : 'all';
- if (!arg2) {
- arg2 = 'all'; // if unspecified, set for all.
- } else if (arg2 == 'unc') { // check for short cut.
- arg2 = 'uncaught';
- }
- excType = arg2;
-
- // Check for:
- // en[able] [all|unc[aught]] exc[eptions]
- // dis[able] [all|unc[aught]] exc[eptions]
- } else if (arg1 == 'all' || arg1 == 'unc' || arg1 == 'uncaught') {
-
- var arg2 = (nextPos > 0) ?
- args.substring(nextPos + 1, args.length) : null;
- if (arg2 == 'exc' || arg1 == 'exception' || arg1 == 'exceptions') {
- excType = arg1;
- if (excType == 'unc') {
- excType = 'uncaught';
- }
- }
- }
-
- // If we matched one of the command formats, then excType will be non-null:
- if (excType) {
- // Build a evaluate request from the text command.
- request = this.createRequest('setexceptionbreak');
-
- request.arguments = {};
- request.arguments.type = excType;
- request.arguments.enabled = (command == 'enable');
-
- return request.toJSONProtocol();
- }
- }
-
- // Build a evaluate request from the text command.
- request = this.createRequest('changebreakpoint');
-
- // Process arguments if any.
- if (args && args.length > 0) {
- request.arguments = {};
- var pos = args.indexOf(' ');
- var breakpointArg = args;
- var otherArgs;
- if (pos > 0) {
- breakpointArg = args.substring(0, pos);
- otherArgs = args.substring(pos + 1, args.length);
- }
-
- request.arguments.breakpoint = parseInt(breakpointArg);
-
- switch(command) {
- case 'cond':
- request.arguments.condition = otherArgs ? otherArgs : null;
- break;
- case 'enable':
- request.arguments.enabled = true;
- break;
- case 'disable':
- request.arguments.enabled = false;
- break;
- case 'ignore':
- request.arguments.ignoreCount = parseInt(otherArgs);
- break;
- default:
- throw new Error('Invalid arguments.');
- }
- } else {
- throw new Error('Invalid arguments.');
- }
-
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the disconnect command.
-DebugRequest.prototype.disconnectCommandToJSONRequest_ = function(args) {
- var request;
- request = this.createRequest('disconnect');
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the info command.
-DebugRequest.prototype.infoCommandToJSONRequest_ = function(args) {
- var request;
- if (args && (args == 'break' || args == 'br')) {
- // Build a evaluate request from the text command.
- request = this.createRequest('listbreakpoints');
- last_cmd = 'info break';
- } else if (args && (args == 'locals' || args == 'lo')) {
- // Build a evaluate request from the text command.
- request = this.createRequest('frame');
- last_cmd = 'info locals';
- } else if (args && (args == 'args' || args == 'ar')) {
- // Build a evaluate request from the text command.
- request = this.createRequest('frame');
- last_cmd = 'info args';
- } else {
- throw new Error('Invalid info arguments.');
- }
-
- return request.toJSONProtocol();
-};
-
-
-DebugRequest.prototype.v8FlagsToJSONRequest_ = function(args) {
- var request;
- request = this.createRequest('v8flags');
- request.arguments = {};
- request.arguments.flags = args;
- return request.toJSONProtocol();
-};
-
-
-DebugRequest.prototype.gcToJSONRequest_ = function(args) {
- var request;
- if (!args) {
- args = 'all';
- }
- var args = args.split(/\s+/g);
- var cmd = args[0];
-
- switch(cmd) {
- case 'all':
- case 'quick':
- case 'full':
- case 'young':
- case 'old':
- case 'compact':
- case 'sweep':
- case 'scavenge': {
- if (cmd == 'young') { cmd = 'quick'; }
- else if (cmd == 'old') { cmd = 'full'; }
-
- request = this.createRequest('gc');
- request.arguments = {};
- request.arguments.type = cmd;
- break;
- }
- // Else fall thru to the default case below to report the error.
- default:
- throw new Error('Missing arguments after ' + cmd + '.');
- }
- return request.toJSONProtocol();
-};
-
-
-// Create a JSON request for the threads command.
-DebugRequest.prototype.threadsCommandToJSONRequest_ = function(args) {
- // Build a threads request from the text command.
- var request = this.createRequest('threads');
- return request.toJSONProtocol();
-};
-
-
-// Handle the trace command.
-DebugRequest.prototype.traceCommand_ = function(args) {
- // Process arguments.
- if (args && args.length > 0) {
- if (args == 'compile') {
- trace_compile = !trace_compile;
- print('Tracing of compiled scripts ' + (trace_compile ? 'on' : 'off'));
- } else if (args === 'debug json' || args === 'json' || args === 'packets') {
- trace_debug_json = !trace_debug_json;
- print('Tracing of debug json packets ' +
- (trace_debug_json ? 'on' : 'off'));
- } else {
- throw new Error('Invalid trace arguments.');
- }
- } else {
- throw new Error('Invalid trace arguments.');
- }
-};
-
-// Handle the help command.
-DebugRequest.prototype.helpCommand_ = function(args) {
- // Help os quite simple.
- if (args && args.length > 0) {
- print('warning: arguments to \'help\' are ignored');
- }
-
- print('Note: <> denotes symbollic values to be replaced with real values.');
- print('Note: [] denotes optional parts of commands, or optional options / arguments.');
- print(' e.g. d[elete] - you get the same command if you type d or delete.');
- print('');
- print('[break] - break as soon as possible');
- print('b[reak] location [condition]');
- print(' - break on named function: location is a function name');
- print(' - break on function: location is #<id>#');
- print(' - break on script position: location is name:line[:column]');
- print('');
- print('clear <breakpoint #> - deletes the specified user defined breakpoint');
- print('d[elete] <breakpoint #> - deletes the specified user defined breakpoint');
- print('dis[able] <breakpoint #> - disables the specified user defined breakpoint');
- print('dis[able] exc[eptions] [[all] | unc[aught]]');
- print(' - disables breaking on exceptions');
- print('en[able] <breakpoint #> - enables the specified user defined breakpoint');
- print('en[able] exc[eptions] [[all] | unc[aught]]');
- print(' - enables breaking on exceptions');
- print('');
- print('b[ack]t[race] [n] | [-n] | [from to]');
- print(' - prints the stack back trace');
- print('f[rame] - prints info about the current frame context');
- print('f[rame] <frame #> - set context to specified frame #');
- print('scopes');
- print('scope <scope #>');
- print('');
- print('up - set context to caller of current frame');
- print('do[wn] - set context to callee of current frame');
- print('inf[o] br[eak] - prints info about breakpoints in use');
- print('inf[o] ar[gs] - prints info about arguments of the current function');
- print('inf[o] lo[cals] - prints info about locals in the current function');
- print('');
- print('step [in | next | out| min [step count]]');
- print('c[ontinue] - continue executing after a breakpoint');
- print('s[tep] [<N>] - step into the next N callees (default N is 1)');
- print('s[tep]i [<N>] - step into the next N callees (default N is 1)');
- print('n[ext] [<N>] - step over the next N callees (default N is 1)');
- print('fin[ish] [<N>] - step out of N frames (default N is 1)');
- print('');
- print('p[rint] <expression> - prints the result of the specified expression');
- print('dir <expression> - prints the object structure of the result');
- print('set <var> = <expression> - executes the specified statement');
- print('');
- print('l[ist] - list the source code around for the current pc');
- print('l[ist] [- | <start>,<end>] - list the specified range of source code');
- print('source [from line [num lines]]');
- print('scr[ipts] [native|extensions|all]');
- print('scr[ipts] [<filter text>] - list scripts with the specified text in its description');
- print('');
- print('gc - runs the garbage collector');
- print('');
- print('trace compile');
- // hidden command: trace debug json - toggles tracing of debug json packets
- print('');
- print('disconnect|exit|quit - disconnects and quits the debugger');
- print('help - prints this help information');
-};
-
-
-function formatHandleReference_(value) {
- if (value.handle() >= 0) {
- return '#' + value.handle() + '#';
- } else {
- return '#Transient#';
- }
-}
-
-
-function formatObject_(value, include_properties) {
- var result = '';
- result += formatHandleReference_(value);
- result += ', type: object';
- result += ', constructor ';
- var ctor = value.constructorFunctionValue();
- result += formatHandleReference_(ctor);
- result += ', __proto__ ';
- var proto = value.protoObjectValue();
- result += formatHandleReference_(proto);
- result += ', ';
- result += value.propertyCount();
- result += ' properties.';
- if (include_properties) {
- result += '\n';
- for (var i = 0; i < value.propertyCount(); i++) {
- result += ' ';
- result += value.propertyName(i);
- result += ': ';
- var property_value = value.propertyValue(i);
- if (property_value instanceof ProtocolReference) {
- result += '<no type>';
- } else {
- if (property_value && property_value.type()) {
- result += property_value.type();
- } else {
- result += '<no type>';
- }
- }
- result += ' ';
- result += formatHandleReference_(property_value);
- result += '\n';
- }
- }
- return result;
-}
-
-
-function formatScope_(scope) {
- var result = '';
- var index = scope.index;
- result += '#' + (index <= 9 ? '0' : '') + index;
- result += ' ';
- switch (scope.type) {
- case Debug.ScopeType.Global:
- result += 'Global, ';
- result += '#' + scope.object.ref + '#';
- break;
- case Debug.ScopeType.Local:
- result += 'Local';
- break;
- case Debug.ScopeType.With:
- result += 'With, ';
- result += '#' + scope.object.ref + '#';
- break;
- case Debug.ScopeType.Catch:
- result += 'Catch, ';
- result += '#' + scope.object.ref + '#';
- break;
- case Debug.ScopeType.Closure:
- result += 'Closure';
- break;
- default:
- result += 'UNKNOWN';
- }
- return result;
-}
-
-
-function refObjectToString_(protocolPackage, handle) {
- var value = protocolPackage.lookup(handle);
- var result = '';
- if (value.isString()) {
- result = '"' + value.value() + '"';
- } else if (value.isPrimitive()) {
- result = value.valueString();
- } else if (value.isObject()) {
- result += formatObject_(value, true);
- }
- return result;
-}
-
-
-// Rounds number 'num' to 'length' decimal places.
-function roundNumber(num, length) {
- var factor = Math.pow(10, length);
- return Math.round(num * factor) / factor;
-}
-
-
-// Convert a JSON response to text for display in a text based debugger.
-function DebugResponseDetails(response) {
- var details = { text: '', running: false };
-
- try {
- if (!response.success()) {
- details.text = response.message();
- return details;
- }
-
- // Get the running state.
- details.running = response.running();
-
- var body = response.body();
- var result = '';
- switch (response.command()) {
- case 'suspend':
- details.text = 'stopped';
- break;
-
- case 'setbreakpoint':
- result = 'set breakpoint #';
- result += body.breakpoint;
- details.text = result;
- break;
-
- case 'clearbreakpoint':
- result = 'cleared breakpoint #';
- result += body.breakpoint;
- details.text = result;
- break;
-
- case 'changebreakpoint':
- result = 'successfully changed breakpoint';
- details.text = result;
- break;
-
- case 'listbreakpoints':
- result = 'breakpoints: (' + body.breakpoints.length + ')';
- for (var i = 0; i < body.breakpoints.length; i++) {
- var breakpoint = body.breakpoints[i];
- result += '\n id=' + breakpoint.number;
- result += ' type=' + breakpoint.type;
- if (breakpoint.script_id) {
- result += ' script_id=' + breakpoint.script_id;
- }
- if (breakpoint.script_name) {
- result += ' script_name=' + breakpoint.script_name;
- }
- if (breakpoint.script_regexp) {
- result += ' script_regexp=' + breakpoint.script_regexp;
- }
- result += ' line=' + (breakpoint.line + 1);
- if (breakpoint.column != null) {
- result += ' column=' + (breakpoint.column + 1);
- }
- if (breakpoint.groupId) {
- result += ' groupId=' + breakpoint.groupId;
- }
- if (breakpoint.ignoreCount) {
- result += ' ignoreCount=' + breakpoint.ignoreCount;
- }
- if (breakpoint.active === false) {
- result += ' inactive';
- }
- if (breakpoint.condition) {
- result += ' condition=' + breakpoint.condition;
- }
- result += ' hit_count=' + breakpoint.hit_count;
- }
- if (body.breakpoints.length === 0) {
- result = "No user defined breakpoints\n";
- } else {
- result += '\n';
- }
- if (body.breakOnExceptions) {
- result += '* breaking on ALL exceptions is enabled\n';
- } else if (body.breakOnUncaughtExceptions) {
- result += '* breaking on UNCAUGHT exceptions is enabled\n';
- } else {
- result += '* all exception breakpoints are disabled\n';
- }
- details.text = result;
- break;
-
- case 'setexceptionbreak':
- result = 'Break on ' + body.type + ' exceptions: ';
- result += body.enabled ? 'enabled' : 'disabled';
- details.text = result;
- break;
-
- case 'backtrace':
- if (body.totalFrames == 0) {
- result = '(empty stack)';
- } else {
- var result = 'Frames #' + body.fromFrame + ' to #' +
- (body.toFrame - 1) + ' of ' + body.totalFrames + '\n';
- for (i = 0; i < body.frames.length; i++) {
- if (i != 0) result += '\n';
- result += body.frames[i].text;
- }
- }
- details.text = result;
- break;
-
- case 'frame':
- if (last_cmd === 'info locals') {
- var locals = body.locals;
- if (locals.length === 0) {
- result = 'No locals';
- } else {
- for (var i = 0; i < locals.length; i++) {
- var local = locals[i];
- result += local.name + ' = ';
- result += refObjectToString_(response, local.value.ref);
- result += '\n';
- }
- }
- } else if (last_cmd === 'info args') {
- var args = body.arguments;
- if (args.length === 0) {
- result = 'No arguments';
- } else {
- for (var i = 0; i < args.length; i++) {
- var arg = args[i];
- result += arg.name + ' = ';
- result += refObjectToString_(response, arg.value.ref);
- result += '\n';
- }
- }
- } else {
- result = SourceUnderline(body.sourceLineText,
- body.column);
- Debug.State.currentSourceLine = body.line;
- Debug.State.currentFrame = body.index;
- Debug.State.displaySourceStartLine = -1;
- Debug.State.displaySourceEndLine = -1;
- }
- details.text = result;
- break;
-
- case 'scopes':
- if (body.totalScopes == 0) {
- result = '(no scopes)';
- } else {
- result = 'Scopes #' + body.fromScope + ' to #' +
- (body.toScope - 1) + ' of ' + body.totalScopes + '\n';
- for (i = 0; i < body.scopes.length; i++) {
- if (i != 0) {
- result += '\n';
- }
- result += formatScope_(body.scopes[i]);
- }
- }
- details.text = result;
- break;
-
- case 'scope':
- result += formatScope_(body);
- result += '\n';
- var scope_object_value = response.lookup(body.object.ref);
- result += formatObject_(scope_object_value, true);
- details.text = result;
- break;
-
- case 'evaluate':
- case 'lookup':
- case 'getobj':
- if (last_cmd == 'p' || last_cmd == 'print') {
- result = body.text;
- } else {
- var value;
- if (lookup_handle) {
- value = response.bodyValue(lookup_handle);
- } else {
- value = response.bodyValue();
- }
- if (value.isObject()) {
- result += formatObject_(value, true);
- } else {
- result += 'type: ';
- result += value.type();
- if (!value.isUndefined() && !value.isNull()) {
- result += ', ';
- if (value.isString()) {
- result += '"';
- }
- result += value.value();
- if (value.isString()) {
- result += '"';
- }
- }
- result += '\n';
- }
- }
- details.text = result;
- break;
-
- case 'references':
- var count = body.length;
- result += 'found ' + count + ' objects';
- result += '\n';
- for (var i = 0; i < count; i++) {
- var value = response.bodyValue(i);
- result += formatObject_(value, false);
- result += '\n';
- }
- details.text = result;
- break;
-
- case 'source':
- // Get the source from the response.
- var source = body.source;
- var from_line = body.fromLine + 1;
- var lines = source.split('\n');
- var maxdigits = 1 + Math.floor(log10(from_line + lines.length));
- if (maxdigits < 3) {
- maxdigits = 3;
- }
- var result = '';
- for (var num = 0; num < lines.length; num++) {
- // Check if there's an extra newline at the end.
- if (num == (lines.length - 1) && lines[num].length == 0) {
- break;
- }
-
- var current_line = from_line + num;
- var spacer = maxdigits - (1 + Math.floor(log10(current_line)));
- if (current_line == Debug.State.currentSourceLine + 1) {
- for (var i = 0; i < maxdigits; i++) {
- result += '>';
- }
- result += ' ';
- } else {
- for (var i = 0; i < spacer; i++) {
- result += ' ';
- }
- result += current_line + ': ';
- }
- result += lines[num];
- result += '\n';
- }
- details.text = result;
- break;
-
- case 'scripts':
- var result = '';
- for (i = 0; i < body.length; i++) {
- if (i != 0) result += '\n';
- if (body[i].id) {
- result += body[i].id;
- } else {
- result += '[no id]';
- }
- result += ', ';
- if (body[i].name) {
- result += body[i].name;
- } else {
- if (body[i].compilationType == Debug.ScriptCompilationType.Eval
- && body[i].evalFromScript
- ) {
- result += 'eval from ';
- var script_value = response.lookup(body[i].evalFromScript.ref);
- result += ' ' + script_value.field('name');
- result += ':' + (body[i].evalFromLocation.line + 1);
- result += ':' + body[i].evalFromLocation.column;
- } else if (body[i].compilationType ==
- Debug.ScriptCompilationType.JSON) {
- result += 'JSON ';
- } else { // body[i].compilation == Debug.ScriptCompilationType.Host
- result += '[unnamed] ';
- }
- }
- result += ' (lines: ';
- result += body[i].lineCount;
- result += ', length: ';
- result += body[i].sourceLength;
- if (body[i].type == Debug.ScriptType.Native) {
- result += ', native';
- } else if (body[i].type == Debug.ScriptType.Extension) {
- result += ', extension';
- }
- result += '), [';
- var sourceStart = body[i].sourceStart;
- if (sourceStart.length > 40) {
- sourceStart = sourceStart.substring(0, 37) + '...';
- }
- result += sourceStart;
- result += ']';
- }
- if (body.length == 0) {
- result = "no matching scripts found";
- }
- details.text = result;
- break;
-
- case 'threads':
- var result = 'Active V8 threads: ' + body.totalThreads + '\n';
- body.threads.sort(function(a, b) { return a.id - b.id; });
- for (i = 0; i < body.threads.length; i++) {
- result += body.threads[i].current ? '*' : ' ';
- result += ' ';
- result += body.threads[i].id;
- result += '\n';
- }
- details.text = result;
- break;
-
- case 'continue':
- details.text = "(running)";
- break;
-
- case 'v8flags':
- details.text = "flags set";
- break;
-
- case 'gc':
- details.text = "GC " + body.before + " => " + body.after;
- if (body.after > (1024*1024)) {
- details.text +=
- " (" + roundNumber(body.before/(1024*1024), 1) + "M => " +
- roundNumber(body.after/(1024*1024), 1) + "M)";
- } else if (body.after > 1024) {
- details.text +=
- " (" + roundNumber(body.before/1024, 1) + "K => " +
- roundNumber(body.after/1024, 1) + "K)";
- }
- break;
-
- default:
- details.text =
- 'Response for unknown command \'' + response.command() + '\'' +
- ' (' + response.raw_json() + ')';
- }
- } catch (e) {
- details.text = 'Error: "' + e + '" formatting response';
- }
-
- return details;
-}
-
-
-/**
- * Protocol packages send from the debugger.
- * @param {string} json - raw protocol packet as JSON string.
- * @constructor
- */
-function ProtocolPackage(json) {
- this.raw_json_ = json;
- this.packet_ = JSON.parse(json);
- this.refs_ = [];
- if (this.packet_.refs) {
- for (var i = 0; i < this.packet_.refs.length; i++) {
- this.refs_[this.packet_.refs[i].handle] = this.packet_.refs[i];
- }
- }
-}
-
-
-/**
- * Get the packet type.
- * @return {String} the packet type
- */
-ProtocolPackage.prototype.type = function() {
- return this.packet_.type;
-};
-
-
-/**
- * Get the packet event.
- * @return {Object} the packet event
- */
-ProtocolPackage.prototype.event = function() {
- return this.packet_.event;
-};
-
-
-/**
- * Get the packet request sequence.
- * @return {number} the packet request sequence
- */
-ProtocolPackage.prototype.requestSeq = function() {
- return this.packet_.request_seq;
-};
-
-
-/**
- * Get the packet request sequence.
- * @return {number} the packet request sequence
- */
-ProtocolPackage.prototype.running = function() {
- return this.packet_.running ? true : false;
-};
-
-
-ProtocolPackage.prototype.success = function() {
- return this.packet_.success ? true : false;
-};
-
-
-ProtocolPackage.prototype.message = function() {
- return this.packet_.message;
-};
-
-
-ProtocolPackage.prototype.command = function() {
- return this.packet_.command;
-};
-
-
-ProtocolPackage.prototype.body = function() {
- return this.packet_.body;
-};
-
-
-ProtocolPackage.prototype.bodyValue = function(index) {
- if (index != null) {
- return new ProtocolValue(this.packet_.body[index], this);
- } else {
- return new ProtocolValue(this.packet_.body, this);
- }
-};
-
-
-ProtocolPackage.prototype.body = function() {
- return this.packet_.body;
-};
-
-
-ProtocolPackage.prototype.lookup = function(handle) {
- var value = this.refs_[handle];
- if (value) {
- return new ProtocolValue(value, this);
- } else {
- return new ProtocolReference(handle);
- }
-};
-
-
-ProtocolPackage.prototype.raw_json = function() {
- return this.raw_json_;
-};
-
-
-function ProtocolValue(value, packet) {
- this.value_ = value;
- this.packet_ = packet;
-}
-
-
-/**
- * Get the value type.
- * @return {String} the value type
- */
-ProtocolValue.prototype.type = function() {
- return this.value_.type;
-};
-
-
-/**
- * Get a metadata field from a protocol value.
- * @return {Object} the metadata field value
- */
-ProtocolValue.prototype.field = function(name) {
- return this.value_[name];
-};
-
-
-/**
- * Check is the value is a primitive value.
- * @return {boolean} true if the value is primitive
- */
-ProtocolValue.prototype.isPrimitive = function() {
- return this.isUndefined() || this.isNull() || this.isBoolean() ||
- this.isNumber() || this.isString();
-};
-
-
-/**
- * Get the object handle.
- * @return {number} the value handle
- */
-ProtocolValue.prototype.handle = function() {
- return this.value_.handle;
-};
-
-
-/**
- * Check is the value is undefined.
- * @return {boolean} true if the value is undefined
- */
-ProtocolValue.prototype.isUndefined = function() {
- return this.value_.type == 'undefined';
-};
-
-
-/**
- * Check is the value is null.
- * @return {boolean} true if the value is null
- */
-ProtocolValue.prototype.isNull = function() {
- return this.value_.type == 'null';
-};
-
-
-/**
- * Check is the value is a boolean.
- * @return {boolean} true if the value is a boolean
- */
-ProtocolValue.prototype.isBoolean = function() {
- return this.value_.type == 'boolean';
-};
-
-
-/**
- * Check is the value is a number.
- * @return {boolean} true if the value is a number
- */
-ProtocolValue.prototype.isNumber = function() {
- return this.value_.type == 'number';
-};
-
-
-/**
- * Check is the value is a string.
- * @return {boolean} true if the value is a string
- */
-ProtocolValue.prototype.isString = function() {
- return this.value_.type == 'string';
-};
-
-
-/**
- * Check is the value is an object.
- * @return {boolean} true if the value is an object
- */
-ProtocolValue.prototype.isObject = function() {
- return this.value_.type == 'object' || this.value_.type == 'function' ||
- this.value_.type == 'error' || this.value_.type == 'regexp';
-};
-
-
-/**
- * Get the constructor function
- * @return {ProtocolValue} constructor function
- */
-ProtocolValue.prototype.constructorFunctionValue = function() {
- var ctor = this.value_.constructorFunction;
- return this.packet_.lookup(ctor.ref);
-};
-
-
-/**
- * Get the __proto__ value
- * @return {ProtocolValue} __proto__ value
- */
-ProtocolValue.prototype.protoObjectValue = function() {
- var proto = this.value_.protoObject;
- return this.packet_.lookup(proto.ref);
-};
-
-
-/**
- * Get the number og properties.
- * @return {number} the number of properties
- */
-ProtocolValue.prototype.propertyCount = function() {
- return this.value_.properties ? this.value_.properties.length : 0;
-};
-
-
-/**
- * Get the specified property name.
- * @return {string} property name
- */
-ProtocolValue.prototype.propertyName = function(index) {
- var property = this.value_.properties[index];
- return property.name;
-};
-
-
-/**
- * Return index for the property name.
- * @param name The property name to look for
- * @return {number} index for the property name
- */
-ProtocolValue.prototype.propertyIndex = function(name) {
- for (var i = 0; i < this.propertyCount(); i++) {
- if (this.value_.properties[i].name == name) {
- return i;
- }
- }
- return null;
-};
-
-
-/**
- * Get the specified property value.
- * @return {ProtocolValue} property value
- */
-ProtocolValue.prototype.propertyValue = function(index) {
- var property = this.value_.properties[index];
- return this.packet_.lookup(property.ref);
-};
-
-
-/**
- * Check is the value is a string.
- * @return {boolean} true if the value is a string
- */
-ProtocolValue.prototype.value = function() {
- return this.value_.value;
-};
-
-
-ProtocolValue.prototype.valueString = function() {
- return this.value_.text;
-};
-
-
-function ProtocolReference(handle) {
- this.handle_ = handle;
-}
-
-
-ProtocolReference.prototype.handle = function() {
- return this.handle_;
-};
-
-
// A more universal stringify that supports more types than JSON.
// Used by the d8 shell to output results.
var stringifyDepthLimit = 4; // To avoid crashing on cyclic objects
@@ -2017,6 +60,6 @@ function Stringify(x, depth) {
}
return "{" + props.join(", ") + "}";
default:
- return "[crazy non-standard shit]";
+ return "[crazy non-standard value]";
}
}
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index 118c8a69c9..3a74e102ee 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -17,15 +17,18 @@ var $createDate;
var GlobalDate = global.Date;
var InternalArray = utils.InternalArray;
-
var IsFinite;
var MathAbs;
var MathFloor;
+var ToNumber;
+var ToString;
utils.Import(function(from) {
IsFinite = from.IsFinite;
MathAbs = from.MathAbs;
MathFloor = from.MathFloor;
+ ToNumber = from.ToNumber;
+ ToString = from.ToString;
});
// -------------------------------------------------------------------
@@ -169,17 +172,17 @@ function DateConstructor(year, month, date, hours, minutes, seconds, ms) {
// which is the default for everything else than Date objects.
// This makes us behave like KJS and SpiderMonkey.
var time = $toPrimitive(year, NUMBER_HINT);
- value = IS_STRING(time) ? DateParse(time) : $toNumber(time);
+ value = IS_STRING(time) ? DateParse(time) : ToNumber(time);
}
SET_UTC_DATE_VALUE(this, value);
} else {
- year = $toNumber(year);
- month = $toNumber(month);
- date = argc > 2 ? $toNumber(date) : 1;
- hours = argc > 3 ? $toNumber(hours) : 0;
- minutes = argc > 4 ? $toNumber(minutes) : 0;
- seconds = argc > 5 ? $toNumber(seconds) : 0;
- ms = argc > 6 ? $toNumber(ms) : 0;
+ year = ToNumber(year);
+ month = ToNumber(month);
+ date = argc > 2 ? ToNumber(date) : 1;
+ hours = argc > 3 ? ToNumber(hours) : 0;
+ minutes = argc > 4 ? ToNumber(minutes) : 0;
+ seconds = argc > 5 ? ToNumber(seconds) : 0;
+ ms = argc > 6 ? ToNumber(ms) : 0;
year = (!NUMBER_IS_NAN(year) &&
0 <= TO_INTEGER(year) &&
TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
@@ -267,7 +270,7 @@ var parse_buffer = new InternalArray(8);
// ECMA 262 - 15.9.4.2
function DateParse(string) {
- var arr = %DateParseString($toString(string), parse_buffer);
+ var arr = %DateParseString(ToString(string), parse_buffer);
if (IS_NULL(arr)) return NAN;
var day = MakeDay(arr[0], arr[1], arr[2]);
@@ -284,14 +287,14 @@ function DateParse(string) {
// ECMA 262 - 15.9.4.3
function DateUTC(year, month, date, hours, minutes, seconds, ms) {
- year = $toNumber(year);
- month = $toNumber(month);
+ year = ToNumber(year);
+ month = ToNumber(month);
var argc = %_ArgumentsLength();
- date = argc > 2 ? $toNumber(date) : 1;
- hours = argc > 3 ? $toNumber(hours) : 0;
- minutes = argc > 4 ? $toNumber(minutes) : 0;
- seconds = argc > 5 ? $toNumber(seconds) : 0;
- ms = argc > 6 ? $toNumber(ms) : 0;
+ date = argc > 2 ? ToNumber(date) : 1;
+ hours = argc > 3 ? ToNumber(hours) : 0;
+ minutes = argc > 4 ? ToNumber(minutes) : 0;
+ seconds = argc > 5 ? ToNumber(seconds) : 0;
+ ms = argc > 6 ? ToNumber(ms) : 0;
year = (!NUMBER_IS_NAN(year) &&
0 <= TO_INTEGER(year) &&
TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
@@ -497,7 +500,7 @@ function DateGetTimezoneOffset() {
// ECMA 262 - 15.9.5.27
function DateSetTime(ms) {
CHECK_DATE(this);
- SET_UTC_DATE_VALUE(this, $toNumber(ms));
+ SET_UTC_DATE_VALUE(this, ToNumber(ms));
return UTC_DATE_VALUE(this);
}
@@ -506,7 +509,7 @@ function DateSetTime(ms) {
function DateSetMilliseconds(ms) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
- ms = $toNumber(ms);
+ ms = ToNumber(ms);
var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), LOCAL_SEC(this), ms);
return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
}
@@ -516,7 +519,7 @@ function DateSetMilliseconds(ms) {
function DateSetUTCMilliseconds(ms) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
- ms = $toNumber(ms);
+ ms = ToNumber(ms);
var time = MakeTime(UTC_HOUR(this),
UTC_MIN(this),
UTC_SEC(this),
@@ -529,8 +532,8 @@ function DateSetUTCMilliseconds(ms) {
function DateSetSeconds(sec, ms) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
- sec = $toNumber(sec);
- ms = %_ArgumentsLength() < 2 ? LOCAL_MS(this) : $toNumber(ms);
+ sec = ToNumber(sec);
+ ms = %_ArgumentsLength() < 2 ? LOCAL_MS(this) : ToNumber(ms);
var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), sec, ms);
return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
}
@@ -540,8 +543,8 @@ function DateSetSeconds(sec, ms) {
function DateSetUTCSeconds(sec, ms) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
- sec = $toNumber(sec);
- ms = %_ArgumentsLength() < 2 ? UTC_MS(this) : $toNumber(ms);
+ sec = ToNumber(sec);
+ ms = %_ArgumentsLength() < 2 ? UTC_MS(this) : ToNumber(ms);
var time = MakeTime(UTC_HOUR(this), UTC_MIN(this), sec, ms);
return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
}
@@ -551,10 +554,10 @@ function DateSetUTCSeconds(sec, ms) {
function DateSetMinutes(min, sec, ms) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
- min = $toNumber(min);
+ min = ToNumber(min);
var argc = %_ArgumentsLength();
- sec = argc < 2 ? LOCAL_SEC(this) : $toNumber(sec);
- ms = argc < 3 ? LOCAL_MS(this) : $toNumber(ms);
+ sec = argc < 2 ? LOCAL_SEC(this) : ToNumber(sec);
+ ms = argc < 3 ? LOCAL_MS(this) : ToNumber(ms);
var time = MakeTime(LOCAL_HOUR(this), min, sec, ms);
return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
}
@@ -564,10 +567,10 @@ function DateSetMinutes(min, sec, ms) {
function DateSetUTCMinutes(min, sec, ms) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
- min = $toNumber(min);
+ min = ToNumber(min);
var argc = %_ArgumentsLength();
- sec = argc < 2 ? UTC_SEC(this) : $toNumber(sec);
- ms = argc < 3 ? UTC_MS(this) : $toNumber(ms);
+ sec = argc < 2 ? UTC_SEC(this) : ToNumber(sec);
+ ms = argc < 3 ? UTC_MS(this) : ToNumber(ms);
var time = MakeTime(UTC_HOUR(this), min, sec, ms);
return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
}
@@ -577,11 +580,11 @@ function DateSetUTCMinutes(min, sec, ms) {
function DateSetHours(hour, min, sec, ms) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
- hour = $toNumber(hour);
+ hour = ToNumber(hour);
var argc = %_ArgumentsLength();
- min = argc < 2 ? LOCAL_MIN(this) : $toNumber(min);
- sec = argc < 3 ? LOCAL_SEC(this) : $toNumber(sec);
- ms = argc < 4 ? LOCAL_MS(this) : $toNumber(ms);
+ min = argc < 2 ? LOCAL_MIN(this) : ToNumber(min);
+ sec = argc < 3 ? LOCAL_SEC(this) : ToNumber(sec);
+ ms = argc < 4 ? LOCAL_MS(this) : ToNumber(ms);
var time = MakeTime(hour, min, sec, ms);
return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
}
@@ -591,11 +594,11 @@ function DateSetHours(hour, min, sec, ms) {
function DateSetUTCHours(hour, min, sec, ms) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
- hour = $toNumber(hour);
+ hour = ToNumber(hour);
var argc = %_ArgumentsLength();
- min = argc < 2 ? UTC_MIN(this) : $toNumber(min);
- sec = argc < 3 ? UTC_SEC(this) : $toNumber(sec);
- ms = argc < 4 ? UTC_MS(this) : $toNumber(ms);
+ min = argc < 2 ? UTC_MIN(this) : ToNumber(min);
+ sec = argc < 3 ? UTC_SEC(this) : ToNumber(sec);
+ ms = argc < 4 ? UTC_MS(this) : ToNumber(ms);
var time = MakeTime(hour, min, sec, ms);
return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
}
@@ -605,7 +608,7 @@ function DateSetUTCHours(hour, min, sec, ms) {
function DateSetDate(date) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
- date = $toNumber(date);
+ date = ToNumber(date);
var day = MakeDay(LOCAL_YEAR(this), LOCAL_MONTH(this), date);
return SET_LOCAL_DATE_VALUE(this, MakeDate(day, LOCAL_TIME_IN_DAY(this)));
}
@@ -615,7 +618,7 @@ function DateSetDate(date) {
function DateSetUTCDate(date) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
- date = $toNumber(date);
+ date = ToNumber(date);
var day = MakeDay(UTC_YEAR(this), UTC_MONTH(this), date);
return SET_UTC_DATE_VALUE(this, MakeDate(day, UTC_TIME_IN_DAY(this)));
}
@@ -625,8 +628,8 @@ function DateSetUTCDate(date) {
function DateSetMonth(month, date) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
- month = $toNumber(month);
- date = %_ArgumentsLength() < 2 ? LOCAL_DAY(this) : $toNumber(date);
+ month = ToNumber(month);
+ date = %_ArgumentsLength() < 2 ? LOCAL_DAY(this) : ToNumber(date);
var day = MakeDay(LOCAL_YEAR(this), month, date);
return SET_LOCAL_DATE_VALUE(this, MakeDate(day, LOCAL_TIME_IN_DAY(this)));
}
@@ -636,8 +639,8 @@ function DateSetMonth(month, date) {
function DateSetUTCMonth(month, date) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
- month = $toNumber(month);
- date = %_ArgumentsLength() < 2 ? UTC_DAY(this) : $toNumber(date);
+ month = ToNumber(month);
+ date = %_ArgumentsLength() < 2 ? UTC_DAY(this) : ToNumber(date);
var day = MakeDay(UTC_YEAR(this), month, date);
return SET_UTC_DATE_VALUE(this, MakeDate(day, UTC_TIME_IN_DAY(this)));
}
@@ -647,16 +650,16 @@ function DateSetUTCMonth(month, date) {
function DateSetFullYear(year, month, date) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
- year = $toNumber(year);
+ year = ToNumber(year);
var argc = %_ArgumentsLength();
var time ;
if (NUMBER_IS_NAN(t)) {
- month = argc < 2 ? 0 : $toNumber(month);
- date = argc < 3 ? 1 : $toNumber(date);
+ month = argc < 2 ? 0 : ToNumber(month);
+ date = argc < 3 ? 1 : ToNumber(date);
time = 0;
} else {
- month = argc < 2 ? LOCAL_MONTH(this) : $toNumber(month);
- date = argc < 3 ? LOCAL_DAY(this) : $toNumber(date);
+ month = argc < 2 ? LOCAL_MONTH(this) : ToNumber(month);
+ date = argc < 3 ? LOCAL_DAY(this) : ToNumber(date);
time = LOCAL_TIME_IN_DAY(this);
}
var day = MakeDay(year, month, date);
@@ -668,16 +671,16 @@ function DateSetFullYear(year, month, date) {
function DateSetUTCFullYear(year, month, date) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
- year = $toNumber(year);
+ year = ToNumber(year);
var argc = %_ArgumentsLength();
var time ;
if (NUMBER_IS_NAN(t)) {
- month = argc < 2 ? 0 : $toNumber(month);
- date = argc < 3 ? 1 : $toNumber(date);
+ month = argc < 2 ? 0 : ToNumber(month);
+ date = argc < 3 ? 1 : ToNumber(date);
time = 0;
} else {
- month = argc < 2 ? UTC_MONTH(this) : $toNumber(month);
- date = argc < 3 ? UTC_DAY(this) : $toNumber(date);
+ month = argc < 2 ? UTC_MONTH(this) : ToNumber(month);
+ date = argc < 3 ? UTC_DAY(this) : ToNumber(date);
time = UTC_TIME_IN_DAY(this);
}
var day = MakeDay(year, month, date);
@@ -709,7 +712,7 @@ function DateGetYear() {
// ECMA 262 - B.2.5
function DateSetYear(year) {
CHECK_DATE(this);
- year = $toNumber(year);
+ year = ToNumber(year);
if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, NAN);
year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
@@ -775,7 +778,7 @@ function DateToISOString() {
function DateToJSON(key) {
- var o = $toObject(this);
+ var o = TO_OBJECT(this);
var tv = $defaultNumber(o);
if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
return null;
@@ -880,7 +883,8 @@ utils.InstallFunctions(GlobalDate.prototype, DONT_ENUM, [
"toJSON", DateToJSON
]);
-// Expose to the global scope.
-$createDate = CreateDate;
+utils.ExportToRuntime(function(to) {
+ to.CreateDate = CreateDate;
+});
})
diff --git a/deps/v8/src/dateparser-inl.h b/deps/v8/src/dateparser-inl.h
index f7360f8c02..8973aa0d4f 100644
--- a/deps/v8/src/dateparser-inl.h
+++ b/deps/v8/src/dateparser-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_DATEPARSER_INL_H_
#define V8_DATEPARSER_INL_H_
+#include "src/char-predicates-inl.h"
#include "src/dateparser.h"
namespace v8 {
@@ -22,9 +23,9 @@ bool DateParser::Parse(Vector<Char> str,
DayComposer day;
// Specification:
- // Accept ES5 ISO 8601 date-time-strings or legacy dates compatible
+ // Accept ES6 ISO 8601 date-time-strings or legacy dates compatible
// with Safari.
- // ES5 ISO 8601 dates:
+ // ES6 ISO 8601 dates:
// [('-'|'+')yy]yyyy[-MM[-DD]][THH:mm[:ss[.sss]][Z|(+|-)hh:mm]]
// where yyyy is in the range 0000..9999 and
// +/-yyyyyy is in the range -999999..+999999 -
@@ -39,8 +40,7 @@ bool DateParser::Parse(Vector<Char> str,
// sss is in the range 000..999,
// hh is in the range 00..23,
// mm, ss, and sss default to 00 if missing, and
- // timezone defaults to Z if missing
- // (following Safari, ISO actually demands local time).
+ // timezone defaults to local time if missing.
// Extensions:
// We also allow sss to have more or less than three digits (but at
// least one).
@@ -62,15 +62,13 @@ bool DateParser::Parse(Vector<Char> str,
// is allowed).
// Intersection of the two:
// A string that matches both formats (e.g. 1970-01-01) will be
- // parsed as an ES5 date-time string - which means it will default
- // to UTC time-zone. That's unavoidable if following the ES5
- // specification.
- // After a valid "T" has been read while scanning an ES5 datetime string,
+ // parsed as an ES6 date-time string.
+ // After a valid "T" has been read while scanning an ES6 datetime string,
// the input can no longer be a valid legacy date, since the "T" is a
// garbage string after a number has been read.
- // First try getting as far as possible with as ES5 Date Time String.
- DateToken next_unhandled_token = ParseES5DateTime(&scanner, &day, &time, &tz);
+ // First try getting as far as possible with as ES6 Date Time String.
+ DateToken next_unhandled_token = ParseES6DateTime(&scanner, &day, &time, &tz);
if (next_unhandled_token.IsInvalid()) return false;
bool has_read_number = !day.IsEmpty();
// If there's anything left, continue with the legacy parser.
@@ -195,7 +193,7 @@ DateParser::DateToken DateParser::DateStringTokenizer<CharType>::Scan() {
template <typename Char>
-DateParser::DateToken DateParser::ParseES5DateTime(
+DateParser::DateToken DateParser::ParseES6DateTime(
DateStringTokenizer<Char>* scanner,
DayComposer* day,
TimeComposer* time,
@@ -233,7 +231,7 @@ DateParser::DateToken DateParser::ParseES5DateTime(
if (!scanner->Peek().IsKeywordType(TIME_SEPARATOR)) {
if (!scanner->Peek().IsEndOfInput()) return scanner->Next();
} else {
- // ES5 Date Time String time part is present.
+ // ES6 Date Time String time part is present.
scanner->Next();
if (!scanner->Peek().IsFixedLengthNumber(2) ||
!Between(scanner->Peek().number(), 0, 24)) {
@@ -299,8 +297,7 @@ DateParser::DateToken DateParser::ParseES5DateTime(
}
if (!scanner->Peek().IsEndOfInput()) return DateToken::Invalid();
}
- // Successfully parsed ES5 Date Time String. Default to UTC if no TZ given.
- if (tz->IsEmpty()) tz->Set(0);
+ // Successfully parsed ES6 Date Time String.
day->set_iso_date();
return DateToken::EndOfInput();
}
diff --git a/deps/v8/src/dateparser.cc b/deps/v8/src/dateparser.cc
index 0e5cc8c3ef..09dbf1127d 100644
--- a/deps/v8/src/dateparser.cc
+++ b/deps/v8/src/dateparser.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/dateparser.h"
+#include "src/char-predicates-inl.h"
+#include "src/objects-inl.h"
+
namespace v8 {
namespace internal {
@@ -80,7 +81,12 @@ bool DateParser::TimeComposer::Write(FixedArray* output) {
}
if (!IsHour(hour) || !IsMinute(minute) ||
- !IsSecond(second) || !IsMillisecond(millisecond)) return false;
+ !IsSecond(second) || !IsMillisecond(millisecond)) {
+ // A 24th hour is allowed if minutes, seconds, and milliseconds are 0
+ if (hour != 24 || minute != 0 || second != 0 || millisecond != 0) {
+ return false;
+ }
+ }
output->set(HOUR, Smi::FromInt(hour));
output->set(MINUTE, Smi::FromInt(minute));
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index f284590264..a9db8685d9 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -6,7 +6,8 @@
#define V8_DATEPARSER_H_
#include "src/allocation.h"
-#include "src/char-predicates-inl.h"
+#include "src/char-predicates.h"
+#include "src/scanner.h"
namespace v8 {
namespace internal {
@@ -367,13 +368,13 @@ class DateParser : public AllStatic {
bool is_iso_date_;
};
- // Tries to parse an ES5 Date Time String. Returns the next token
+ // Tries to parse an ES6 Date Time String. Returns the next token
// to continue with in the legacy date string parser. If parsing is
// complete, returns DateToken::EndOfInput(). If terminally unsuccessful,
// returns DateToken::Invalid(). Otherwise parsing continues in the
// legacy parser.
template <typename Char>
- static DateParser::DateToken ParseES5DateTime(
+ static DateParser::DateToken ParseES6DateTime(
DateStringTokenizer<Char>* scanner,
DayComposer* day,
TimeComposer* time,
diff --git a/deps/v8/src/debug/OWNERS b/deps/v8/src/debug/OWNERS
new file mode 100644
index 0000000000..cf18bd8a3b
--- /dev/null
+++ b/deps/v8/src/debug/OWNERS
@@ -0,0 +1,7 @@
+set noparent
+
+bmeurer@chromium.org
+mvstanton@chromium.org
+ulan@chromium.org
+verwaest@chromium.org
+yangguo@chromium.org
diff --git a/deps/v8/src/debug/arm/OWNERS b/deps/v8/src/debug/arm/OWNERS
new file mode 100644
index 0000000000..906a5ce641
--- /dev/null
+++ b/deps/v8/src/debug/arm/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/deps/v8/src/debug/arm/debug-arm.cc b/deps/v8/src/debug/arm/debug-arm.cc
new file mode 100644
index 0000000000..7f1542e183
--- /dev/null
+++ b/deps/v8/src/debug/arm/debug-arm.cc
@@ -0,0 +1,159 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void EmitDebugBreakSlot(MacroAssembler* masm) {
+ Label check_size;
+ __ bind(&check_size);
+ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+ __ nop(MacroAssembler::DEBUG_BREAK_NOP);
+ }
+ DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
+ masm->InstructionsGeneratedSince(&check_size));
+}
+
+
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
+ int call_argc) {
+ // Generate enough nop's to make space for a call instruction. Avoid emitting
+ // the constant pool in the debug break slot code.
+ Assembler::BlockConstPoolScope block_const_pool(masm);
+ masm->RecordDebugBreakSlot(mode, call_argc);
+ EmitDebugBreakSlot(masm);
+}
+
+
+void DebugCodegen::ClearDebugBreakSlot(Address pc) {
+ CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+ EmitDebugBreakSlot(patcher.masm());
+}
+
+
+void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+ DCHECK_EQ(Code::BUILTIN, code->kind());
+ CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+ // Patch the code changing the debug break slot code from
+ // mov r2, r2
+ // mov r2, r2
+ // mov r2, r2
+ // mov r2, r2
+ // to a call to the debug break slot code.
+ // ldr ip, [pc, #0]
+ // b skip
+ // <debug break slot code entry point address>
+ // skip:
+ // blx ip
+ Label skip_constant;
+ patcher.masm()->ldr(ip, MemOperand(v8::internal::pc, 0));
+ patcher.masm()->b(&skip_constant);
+ patcher.Emit(code->entry());
+ patcher.masm()->bind(&skip_constant);
+ patcher.masm()->blx(ip);
+}
+
+
+void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
+ DebugBreakCallHelperMode mode) {
+ __ RecordComment("Debug break");
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Load padding words on stack.
+ __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
+ for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
+ __ push(ip);
+ }
+ __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
+ __ push(ip);
+
+ if (mode == SAVE_RESULT_REGISTER) __ push(r0);
+
+ __ mov(r0, Operand::Zero()); // no arguments
+ __ mov(r1,
+ Operand(ExternalReference(
+ Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
+
+ CEntryStub ceb(masm->isolate(), 1);
+ __ CallStub(&ceb);
+
+ if (FLAG_debug_code) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ Register reg = {JSCallerSavedCode(i)};
+ __ mov(reg, Operand(kDebugZapValue));
+ }
+ }
+
+ if (mode == SAVE_RESULT_REGISTER) __ pop(r0);
+
+ // Don't bother removing padding bytes pushed on the stack
+ // as the frame is going to be restored right away.
+
+ // Leave the internal frame.
+ }
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
+ __ mov(ip, Operand(after_break_target));
+ __ ldr(ip, MemOperand(ip));
+ __ Jump(ip);
+}
+
+
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ __ Ret();
+}
+
+
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ ExternalReference restarter_frame_function_slot =
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
+ __ mov(ip, Operand(restarter_frame_function_slot));
+ __ mov(r1, Operand::Zero());
+ __ str(r1, MemOperand(ip, 0));
+
+ // Load the function pointer off of our current stack frame.
+ __ ldr(r1, MemOperand(fp,
+ StandardFrameConstants::kConstantPoolOffset - kPointerSize));
+
+ // Pop return address, frame and constant pool pointer (if
+ // FLAG_enable_embedded_constant_pool).
+ __ LeaveFrame(StackFrame::INTERNAL);
+
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ // Load context from the function.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+ // Get function code.
+ __ ldr(ip, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
+ __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Re-run JSFunction, r1 is function, cp is context.
+ __ Jump(ip);
+ }
+}
+
+
+const bool LiveEdit::kFrameDropperSupported = true;
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/debug/arm64/OWNERS b/deps/v8/src/debug/arm64/OWNERS
new file mode 100644
index 0000000000..906a5ce641
--- /dev/null
+++ b/deps/v8/src/debug/arm64/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/deps/v8/src/debug/arm64/debug-arm64.cc b/deps/v8/src/debug/arm64/debug-arm64.cc
new file mode 100644
index 0000000000..7272fe7bcf
--- /dev/null
+++ b/deps/v8/src/debug/arm64/debug-arm64.cc
@@ -0,0 +1,166 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/arm64/frames-arm64.h"
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void EmitDebugBreakSlot(Assembler* masm) {
+ Label check_size;
+ __ bind(&check_size);
+ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+ __ nop(Assembler::DEBUG_BREAK_NOP);
+ }
+ DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
+ static_cast<int>(masm->InstructionsGeneratedSince(&check_size)));
+}
+
+
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
+ int call_argc) {
+ // Generate enough nop's to make space for a call instruction. Avoid emitting
+ // the constant pool in the debug break slot code.
+ InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
+ masm->RecordDebugBreakSlot(mode, call_argc);
+ EmitDebugBreakSlot(masm);
+}
+
+
+void DebugCodegen::ClearDebugBreakSlot(Address pc) {
+ PatchingAssembler patcher(reinterpret_cast<Instruction*>(pc),
+ Assembler::kDebugBreakSlotInstructions);
+ EmitDebugBreakSlot(&patcher);
+}
+
+
+void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+ DCHECK_EQ(Code::BUILTIN, code->kind());
+ PatchingAssembler patcher(reinterpret_cast<Instruction*>(pc),
+ Assembler::kDebugBreakSlotInstructions);
+ // Patch the code emitted by DebugCodegen::GenerateSlots, changing the debug
+ // break slot code from
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // mov x0, x0 @ nop DEBUG_BREAK_NOP
+ // to a call to the debug slot code.
+ // ldr ip0, [pc, #(2 * kInstructionSize)]
+ // blr ip0
+ // b skip
+ // <debug break slot code entry point address (64 bits)>
+ // skip:
+
+ Label skip_constant;
+ // The first instruction of a patched debug break slot must be a load literal
+ // loading the address of the debug break slot code.
+ patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
+ patcher.b(&skip_constant);
+ patcher.dc64(reinterpret_cast<int64_t>(code->entry()));
+ patcher.bind(&skip_constant);
+ // TODO(all): check the following is correct.
+ // The debug break slot code will push a frame and call statically compiled
+ // code. By using blr, this call site will be registered in the frame.
+ // The debugger can now iterate on the frames to find this call.
+ patcher.blr(ip0);
+}
+
+
+void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
+ DebugBreakCallHelperMode mode) {
+ __ RecordComment("Debug break");
+ Register scratch = x10;
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Load padding words on stack.
+ __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingValue));
+ __ PushMultipleTimes(scratch, LiveEdit::kFramePaddingInitialSize);
+ __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
+ __ Push(scratch);
+
+ if (mode == SAVE_RESULT_REGISTER) __ Push(x0);
+
+ __ Mov(x0, 0); // No arguments.
+ __ Mov(x1, ExternalReference(Runtime::FunctionForId(Runtime::kDebugBreak),
+ masm->isolate()));
+
+ CEntryStub stub(masm->isolate(), 1);
+ __ CallStub(&stub);
+
+ if (FLAG_debug_code) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ Register reg = Register::XRegFromCode(JSCallerSavedCode(i));
+ __ Mov(reg, Operand(kDebugZapValue));
+ }
+ }
+
+ // Restore the register values from the expression stack.
+ if (mode == SAVE_RESULT_REGISTER) __ Pop(x0);
+
+ // Don't bother removing padding bytes pushed on the stack
+ // as the frame is going to be restored right away.
+
+ // Leave the internal frame.
+ }
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
+ __ Mov(scratch, after_break_target);
+ __ Ldr(scratch, MemOperand(scratch));
+ __ Br(scratch);
+}
+
+
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ __ Ret();
+}
+
+
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ ExternalReference restarter_frame_function_slot =
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
+
+ __ Mov(scratch, restarter_frame_function_slot);
+ __ Str(xzr, MemOperand(scratch));
+
+ // We do not know our frame height, but set sp based on fp.
+ __ Sub(masm->StackPointer(), fp, kPointerSize);
+ __ AssertStackConsistency();
+
+ __ Pop(x1, fp, lr); // Function, Frame, Return address.
+
+ // Load context from the function.
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+ // Get function code.
+ __ Ldr(scratch, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(scratch, FieldMemOperand(scratch, SharedFunctionInfo::kCodeOffset));
+ __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
+
+ // Re-run JSFunction, x1 is function, cp is context.
+ __ Br(scratch);
+}
+
+
+const bool LiveEdit::kFrameDropperSupported = true;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
new file mode 100644
index 0000000000..323da7312c
--- /dev/null
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -0,0 +1,314 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/debug/debug-evaluate.h"
+
+#include "src/accessors.h"
+#include "src/contexts.h"
+#include "src/debug/debug.h"
+#include "src/debug/debug-frames.h"
+#include "src/debug/debug-scopes.h"
+#include "src/frames-inl.h"
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+static inline bool IsDebugContext(Isolate* isolate, Context* context) {
+ return context->native_context() == *isolate->debug()->debug_context();
+}
+
+
+MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
+ Handle<String> source,
+ bool disable_break,
+ Handle<Object> context_extension) {
+ // Handle the processing of break.
+ DisableBreak disable_break_scope(isolate->debug(), disable_break);
+
+ // Enter the top context from before the debugger was invoked.
+ SaveContext save(isolate);
+ SaveContext* top = &save;
+ while (top != NULL && IsDebugContext(isolate, *top->context())) {
+ top = top->prev();
+ }
+ if (top != NULL) isolate->set_context(*top->context());
+
+ // Get the native context now set to the top context from before the
+ // debugger was invoked.
+ Handle<Context> context = isolate->native_context();
+ Handle<JSObject> receiver(context->global_proxy());
+ Handle<SharedFunctionInfo> outer_info(context->closure()->shared(), isolate);
+ return Evaluate(isolate, outer_info, context, context_extension, receiver,
+ source);
+}
+
+
+MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
+ StackFrame::Id frame_id,
+ int inlined_jsframe_index,
+ Handle<String> source,
+ bool disable_break,
+ Handle<Object> context_extension) {
+ // Handle the processing of break.
+ DisableBreak disable_break_scope(isolate->debug(), disable_break);
+
+ // Get the frame where the debugging is performed.
+ JavaScriptFrameIterator it(isolate, frame_id);
+ JavaScriptFrame* frame = it.frame();
+
+ // Traverse the saved contexts chain to find the active context for the
+ // selected frame.
+ SaveContext* save =
+ DebugFrameHelper::FindSavedContextForFrame(isolate, frame);
+ SaveContext savex(isolate);
+ isolate->set_context(*(save->context()));
+
+ // Materialize stack locals and the arguments object.
+ ContextBuilder context_builder(isolate, frame, inlined_jsframe_index);
+ if (isolate->has_pending_exception()) return MaybeHandle<Object>();
+
+ Handle<Object> receiver(frame->receiver(), isolate);
+ MaybeHandle<Object> maybe_result = Evaluate(
+ isolate, context_builder.outer_info(),
+ context_builder.innermost_context(), context_extension, receiver, source);
+ if (!maybe_result.is_null()) context_builder.UpdateValues();
+ return maybe_result;
+}
+
+
+// Compile and evaluate source for the given context.
+MaybeHandle<Object> DebugEvaluate::Evaluate(
+ Isolate* isolate, Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> context, Handle<Object> context_extension,
+ Handle<Object> receiver, Handle<String> source) {
+ if (context_extension->IsJSObject()) {
+ Handle<JSObject> extension = Handle<JSObject>::cast(context_extension);
+ Handle<JSFunction> closure(context->closure(), isolate);
+ context = isolate->factory()->NewWithContext(closure, context, extension);
+ }
+
+ Handle<JSFunction> eval_fun;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, eval_fun,
+ Compiler::GetFunctionFromEval(
+ source, outer_info, context, SLOPPY,
+ NO_PARSE_RESTRICTION, RelocInfo::kNoPosition),
+ Object);
+
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, Execution::Call(isolate, eval_fun, receiver, 0, NULL),
+ Object);
+
+ // Skip the global proxy as it has no properties and always delegates to the
+ // real global object.
+ if (result->IsJSGlobalProxy()) {
+ PrototypeIterator iter(isolate, result);
+ // TODO(verwaest): This will crash when the global proxy is detached.
+ result = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ }
+
+ return result;
+}
+
+
+DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
+ JavaScriptFrame* frame,
+ int inlined_jsframe_index)
+ : isolate_(isolate),
+ frame_(frame),
+ inlined_jsframe_index_(inlined_jsframe_index) {
+ FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
+ Handle<JSFunction> function =
+ handle(JSFunction::cast(frame_inspector.GetFunction()));
+ Handle<Context> outer_context = handle(function->context(), isolate);
+ outer_info_ = handle(function->shared());
+ Handle<Context> inner_context;
+
+ bool stop = false;
+ for (ScopeIterator it(isolate, &frame_inspector);
+ !it.Failed() && !it.Done() && !stop; it.Next()) {
+ ScopeIterator::ScopeType scope_type = it.Type();
+
+ if (scope_type == ScopeIterator::ScopeTypeLocal) {
+ Handle<Context> parent_context =
+ it.HasContext() ? it.CurrentContext() : outer_context;
+
+ // The "this" binding, if any, can't be bound via "with". If we need
+ // to, add another node onto the outer context to bind "this".
+ parent_context = MaterializeReceiver(parent_context, function);
+
+ Handle<JSObject> materialized_function = NewJSObjectWithNullProto();
+
+ frame_inspector.MaterializeStackLocals(materialized_function, function);
+
+ MaterializeArgumentsObject(materialized_function, function);
+
+ Handle<Context> with_context = isolate->factory()->NewWithContext(
+ function, parent_context, materialized_function);
+
+ ContextChainElement context_chain_element;
+ context_chain_element.original_context = it.CurrentContext();
+ context_chain_element.materialized_object = materialized_function;
+ context_chain_element.scope_info = it.CurrentScopeInfo();
+ context_chain_.Add(context_chain_element);
+
+ stop = true;
+ RecordContextsInChain(&inner_context, with_context, with_context);
+ } else if (scope_type == ScopeIterator::ScopeTypeCatch ||
+ scope_type == ScopeIterator::ScopeTypeWith) {
+ Handle<Context> cloned_context = Handle<Context>::cast(
+ isolate->factory()->CopyFixedArray(it.CurrentContext()));
+
+ ContextChainElement context_chain_element;
+ context_chain_element.original_context = it.CurrentContext();
+ context_chain_element.cloned_context = cloned_context;
+ context_chain_.Add(context_chain_element);
+
+ RecordContextsInChain(&inner_context, cloned_context, cloned_context);
+ } else if (scope_type == ScopeIterator::ScopeTypeBlock) {
+ Handle<JSObject> materialized_object = NewJSObjectWithNullProto();
+ frame_inspector.MaterializeStackLocals(materialized_object,
+ it.CurrentScopeInfo());
+ if (it.HasContext()) {
+ Handle<Context> cloned_context = Handle<Context>::cast(
+ isolate->factory()->CopyFixedArray(it.CurrentContext()));
+ Handle<Context> with_context = isolate->factory()->NewWithContext(
+ function, cloned_context, materialized_object);
+
+ ContextChainElement context_chain_element;
+ context_chain_element.original_context = it.CurrentContext();
+ context_chain_element.cloned_context = cloned_context;
+ context_chain_element.materialized_object = materialized_object;
+ context_chain_element.scope_info = it.CurrentScopeInfo();
+ context_chain_.Add(context_chain_element);
+
+ RecordContextsInChain(&inner_context, cloned_context, with_context);
+ } else {
+ Handle<Context> with_context = isolate->factory()->NewWithContext(
+ function, outer_context, materialized_object);
+
+ ContextChainElement context_chain_element;
+ context_chain_element.materialized_object = materialized_object;
+ context_chain_element.scope_info = it.CurrentScopeInfo();
+ context_chain_.Add(context_chain_element);
+
+ RecordContextsInChain(&inner_context, with_context, with_context);
+ }
+ } else {
+ stop = true;
+ }
+ }
+ if (innermost_context_.is_null()) {
+ innermost_context_ = outer_context;
+ }
+ DCHECK(!innermost_context_.is_null());
+}
+
+
+void DebugEvaluate::ContextBuilder::UpdateValues() {
+ for (int i = 0; i < context_chain_.length(); i++) {
+ ContextChainElement element = context_chain_[i];
+ if (!element.original_context.is_null() &&
+ !element.cloned_context.is_null()) {
+ Handle<Context> cloned_context = element.cloned_context;
+ cloned_context->CopyTo(
+ Context::MIN_CONTEXT_SLOTS, *element.original_context,
+ Context::MIN_CONTEXT_SLOTS,
+ cloned_context->length() - Context::MIN_CONTEXT_SLOTS);
+ }
+ if (!element.materialized_object.is_null()) {
+ // Write back potential changes to materialized stack locals to the
+ // stack.
+ FrameInspector(frame_, inlined_jsframe_index_, isolate_)
+ .UpdateStackLocalsFromMaterializedObject(element.materialized_object,
+ element.scope_info);
+ }
+ }
+}
+
+
+Handle<JSObject> DebugEvaluate::ContextBuilder::NewJSObjectWithNullProto() {
+ Handle<JSObject> result =
+ isolate_->factory()->NewJSObject(isolate_->object_function());
+ Handle<Map> new_map =
+ Map::Copy(Handle<Map>(result->map()), "ObjectWithNullProto");
+ Map::SetPrototype(new_map, isolate_->factory()->null_value());
+ JSObject::MigrateToMap(result, new_map);
+ return result;
+}
+
+
+void DebugEvaluate::ContextBuilder::RecordContextsInChain(
+ Handle<Context>* inner_context, Handle<Context> first,
+ Handle<Context> last) {
+ if (!inner_context->is_null()) {
+ (*inner_context)->set_previous(*last);
+ } else {
+ innermost_context_ = last;
+ }
+ *inner_context = first;
+}
+
+
+void DebugEvaluate::ContextBuilder::MaterializeArgumentsObject(
+ Handle<JSObject> target, Handle<JSFunction> function) {
+ // Do not materialize the arguments object for eval or top-level code.
+ // Skip if "arguments" is already taken.
+ if (!function->shared()->is_function()) return;
+ Maybe<bool> maybe = JSReceiver::HasOwnProperty(
+ target, isolate_->factory()->arguments_string());
+ DCHECK(maybe.IsJust());
+ if (maybe.FromJust()) return;
+
+ // FunctionGetArguments can't throw an exception.
+ Handle<JSObject> arguments =
+ Handle<JSObject>::cast(Accessors::FunctionGetArguments(function));
+ Handle<String> arguments_str = isolate_->factory()->arguments_string();
+ JSObject::SetOwnPropertyIgnoreAttributes(target, arguments_str, arguments,
+ NONE)
+ .Check();
+}
+
+
+Handle<Context> DebugEvaluate::ContextBuilder::MaterializeReceiver(
+ Handle<Context> target, Handle<JSFunction> function) {
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<ScopeInfo> scope_info(shared->scope_info());
+ Handle<Object> receiver;
+ switch (scope_info->scope_type()) {
+ case FUNCTION_SCOPE: {
+ VariableMode mode;
+ VariableLocation location;
+ InitializationFlag init_flag;
+ MaybeAssignedFlag maybe_assigned_flag;
+
+ // Don't bother creating a fake context node if "this" is in the context
+ // already.
+ if (ScopeInfo::ContextSlotIndex(
+ scope_info, isolate_->factory()->this_string(), &mode, &location,
+ &init_flag, &maybe_assigned_flag) >= 0) {
+ return target;
+ }
+ receiver = handle(frame_->receiver(), isolate_);
+ break;
+ }
+ case MODULE_SCOPE:
+ receiver = isolate_->factory()->undefined_value();
+ break;
+ case SCRIPT_SCOPE:
+ receiver = handle(function->global_proxy(), isolate_);
+ break;
+ default:
+ // For eval code, arrow functions, and the like, there's no "this" binding
+ // to materialize.
+ return target;
+ }
+
+ return isolate_->factory()->NewCatchContext(
+ function, target, isolate_->factory()->this_string(), receiver);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
new file mode 100644
index 0000000000..49a7fce3ee
--- /dev/null
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -0,0 +1,100 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_DEBUG_EVALUATE_H_
+#define V8_DEBUG_DEBUG_EVALUATE_H_
+
+#include "src/frames.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class DebugEvaluate : public AllStatic {
+ public:
+ static MaybeHandle<Object> Global(Isolate* isolate, Handle<String> source,
+ bool disable_break,
+ Handle<Object> context_extension);
+
+ // Evaluate a piece of JavaScript in the context of a stack frame for
+ // debugging. Things that need special attention are:
+ // - Parameters and stack-allocated locals need to be materialized. Altered
+ // values need to be written back to the stack afterwards.
+ // - The arguments object needs to materialized.
+ static MaybeHandle<Object> Local(Isolate* isolate, StackFrame::Id frame_id,
+ int inlined_jsframe_index,
+ Handle<String> source, bool disable_break,
+ Handle<Object> context_extension);
+
+ private:
+ // This class builds a context chain for evaluation of expressions
+ // in debugger.
+ // The scope chain leading up to a breakpoint where evaluation occurs
+ // looks like:
+ // - [a mix of with, catch and block scopes]
+ // - [function stack + context]
+ // - [outer context]
+ // The builder materializes all stack variables into properties of objects;
+ // the expression is then evaluated as if it is inside a series of 'with'
+ // statements using those objects. To this end, the builder builds a new
+ // context chain, based on a scope chain:
+ // - every With and Catch scope begets a cloned context
+ // - Block scope begets one or two contexts:
+ // - if a block has context-allocated varaibles, its context is cloned
+ // - stack locals are materizalized as a With context
+ // - Local scope begets a With context for materizalized locals, chained to
+ // original function context. Original function context is the end of
+ // the chain.
+ class ContextBuilder {
+ public:
+ ContextBuilder(Isolate* isolate, JavaScriptFrame* frame,
+ int inlined_jsframe_index);
+
+ void UpdateValues();
+
+ Handle<Context> innermost_context() const { return innermost_context_; }
+ Handle<SharedFunctionInfo> outer_info() const { return outer_info_; }
+
+ private:
+ struct ContextChainElement {
+ Handle<Context> original_context;
+ Handle<Context> cloned_context;
+ Handle<JSObject> materialized_object;
+ Handle<ScopeInfo> scope_info;
+ };
+
+ void RecordContextsInChain(Handle<Context>* inner_context,
+ Handle<Context> first, Handle<Context> last);
+
+ Handle<JSObject> NewJSObjectWithNullProto();
+
+ // Helper function to find or create the arguments object for
+ // Runtime_DebugEvaluate.
+ void MaterializeArgumentsObject(Handle<JSObject> target,
+ Handle<JSFunction> function);
+
+ Handle<Context> MaterializeReceiver(Handle<Context> target,
+ Handle<JSFunction> function);
+
+ Handle<SharedFunctionInfo> outer_info_;
+ Handle<Context> innermost_context_;
+ List<ContextChainElement> context_chain_;
+ Isolate* isolate_;
+ JavaScriptFrame* frame_;
+ int inlined_jsframe_index_;
+ };
+
+ static MaybeHandle<Object> Evaluate(Isolate* isolate,
+ Handle<SharedFunctionInfo> outer_info,
+ Handle<Context> context,
+ Handle<Object> context_extension,
+ Handle<Object> receiver,
+ Handle<String> source);
+};
+
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEBUG_DEBUG_EVALUATE_H_
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
new file mode 100644
index 0000000000..c0970a359f
--- /dev/null
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -0,0 +1,219 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/debug/debug-frames.h"
+
+#include "src/frames-inl.h"
+
+namespace v8 {
+namespace internal {
+
+FrameInspector::FrameInspector(JavaScriptFrame* frame,
+ int inlined_jsframe_index, Isolate* isolate)
+ : frame_(frame), deoptimized_frame_(NULL), isolate_(isolate) {
+ has_adapted_arguments_ = frame_->has_adapted_arguments();
+ is_bottommost_ = inlined_jsframe_index == 0;
+ is_optimized_ = frame_->is_optimized();
+ // Calculate the deoptimized frame.
+ if (frame->is_optimized()) {
+ // TODO(turbofan): Revisit once we support deoptimization.
+ if (frame->LookupCode()->is_turbofanned() &&
+ frame->function()->shared()->asm_function() &&
+ !FLAG_turbo_asm_deoptimization) {
+ is_optimized_ = false;
+ return;
+ }
+
+ deoptimized_frame_ = Deoptimizer::DebuggerInspectableFrame(
+ frame, inlined_jsframe_index, isolate);
+ }
+}
+
+
+FrameInspector::~FrameInspector() {
+ // Get rid of the calculated deoptimized frame if any.
+ if (deoptimized_frame_ != NULL) {
+ Deoptimizer::DeleteDebuggerInspectableFrame(deoptimized_frame_, isolate_);
+ }
+}
+
+
+int FrameInspector::GetParametersCount() {
+ return is_optimized_ ? deoptimized_frame_->parameters_count()
+ : frame_->ComputeParametersCount();
+}
+
+
+int FrameInspector::expression_count() {
+ return deoptimized_frame_->expression_count();
+}
+
+
+Object* FrameInspector::GetFunction() {
+ return is_optimized_ ? deoptimized_frame_->GetFunction() : frame_->function();
+}
+
+
+Object* FrameInspector::GetParameter(int index) {
+ return is_optimized_ ? deoptimized_frame_->GetParameter(index)
+ : frame_->GetParameter(index);
+}
+
+
+Object* FrameInspector::GetExpression(int index) {
+ // TODO(turbofan): Revisit once we support deoptimization.
+ if (frame_->LookupCode()->is_turbofanned() &&
+ frame_->function()->shared()->asm_function() &&
+ !FLAG_turbo_asm_deoptimization) {
+ return isolate_->heap()->undefined_value();
+ }
+ return is_optimized_ ? deoptimized_frame_->GetExpression(index)
+ : frame_->GetExpression(index);
+}
+
+
+int FrameInspector::GetSourcePosition() {
+ return is_optimized_ ? deoptimized_frame_->GetSourcePosition()
+ : frame_->LookupCode()->SourcePosition(frame_->pc());
+}
+
+
+bool FrameInspector::IsConstructor() {
+ return is_optimized_ && !is_bottommost_
+ ? deoptimized_frame_->HasConstructStub()
+ : frame_->IsConstructor();
+}
+
+
+Object* FrameInspector::GetContext() {
+ return is_optimized_ ? deoptimized_frame_->GetContext() : frame_->context();
+}
+
+
+// To inspect all the provided arguments the frame might need to be
+// replaced with the arguments frame.
+void FrameInspector::SetArgumentsFrame(JavaScriptFrame* frame) {
+ DCHECK(has_adapted_arguments_);
+ frame_ = frame;
+ is_optimized_ = frame_->is_optimized();
+ DCHECK(!is_optimized_);
+}
+
+
+// Create a plain JSObject which materializes the local scope for the specified
+// frame.
+void FrameInspector::MaterializeStackLocals(Handle<JSObject> target,
+ Handle<ScopeInfo> scope_info) {
+ HandleScope scope(isolate_);
+ // First fill all parameters.
+ for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+ // Do not materialize the parameter if it is shadowed by a context local.
+ Handle<String> name(scope_info->ParameterName(i));
+ if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
+
+ Handle<Object> value(i < GetParametersCount()
+ ? GetParameter(i)
+ : isolate_->heap()->undefined_value(),
+ isolate_);
+ DCHECK(!value->IsTheHole());
+
+ JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
+ }
+
+ // Second fill all stack locals.
+ for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+ if (scope_info->LocalIsSynthetic(i)) continue;
+ Handle<String> name(scope_info->StackLocalName(i));
+ Handle<Object> value(GetExpression(scope_info->StackLocalIndex(i)),
+ isolate_);
+ if (value->IsTheHole()) value = isolate_->factory()->undefined_value();
+
+ JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
+ }
+}
+
+
+void FrameInspector::MaterializeStackLocals(Handle<JSObject> target,
+ Handle<JSFunction> function) {
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<ScopeInfo> scope_info(shared->scope_info());
+ MaterializeStackLocals(target, scope_info);
+}
+
+
+void FrameInspector::UpdateStackLocalsFromMaterializedObject(
+ Handle<JSObject> target, Handle<ScopeInfo> scope_info) {
+ if (is_optimized_) {
+ // Optimized frames are not supported. Simply give up.
+ return;
+ }
+
+ HandleScope scope(isolate_);
+
+ // Parameters.
+ for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+ // Shadowed parameters were not materialized.
+ Handle<String> name(scope_info->ParameterName(i));
+ if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
+
+ DCHECK(!frame_->GetParameter(i)->IsTheHole());
+ Handle<Object> value =
+ Object::GetPropertyOrElement(target, name).ToHandleChecked();
+ frame_->SetParameterValue(i, *value);
+ }
+
+ // Stack locals.
+ for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+ if (scope_info->LocalIsSynthetic(i)) continue;
+ int index = scope_info->StackLocalIndex(i);
+ if (frame_->GetExpression(index)->IsTheHole()) continue;
+ Handle<Object> value =
+ Object::GetPropertyOrElement(
+ target, handle(scope_info->StackLocalName(i), isolate_))
+ .ToHandleChecked();
+ frame_->SetExpression(index, *value);
+ }
+}
+
+
+bool FrameInspector::ParameterIsShadowedByContextLocal(
+ Handle<ScopeInfo> info, Handle<String> parameter_name) {
+ VariableMode mode;
+ VariableLocation location;
+ InitializationFlag init_flag;
+ MaybeAssignedFlag maybe_assigned_flag;
+ return ScopeInfo::ContextSlotIndex(info, parameter_name, &mode, &location,
+ &init_flag, &maybe_assigned_flag) != -1;
+}
+
+
+SaveContext* DebugFrameHelper::FindSavedContextForFrame(
+ Isolate* isolate, JavaScriptFrame* frame) {
+ SaveContext* save = isolate->save_context();
+ while (save != NULL && !save->IsBelowFrame(frame)) {
+ save = save->prev();
+ }
+ DCHECK(save != NULL);
+ return save;
+}
+
+
+int DebugFrameHelper::FindIndexedNonNativeFrame(JavaScriptFrameIterator* it,
+ int index) {
+ int count = -1;
+ for (; !it->done(); it->Advance()) {
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ it->frame()->Summarize(&frames);
+ for (int i = frames.length() - 1; i >= 0; i--) {
+ // Omit functions from native and extension scripts.
+ if (!frames[i].function()->IsSubjectToDebugging()) continue;
+ if (++count == index) return i;
+ }
+ }
+ return -1;
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
new file mode 100644
index 0000000000..86e817d47f
--- /dev/null
+++ b/deps/v8/src/debug/debug-frames.h
@@ -0,0 +1,81 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_DEBUG_FRAMES_H_
+#define V8_DEBUG_DEBUG_FRAMES_H_
+
+#include "src/deoptimizer.h"
+#include "src/frames.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class FrameInspector {
+ public:
+ FrameInspector(JavaScriptFrame* frame, int inlined_jsframe_index,
+ Isolate* isolate);
+
+ ~FrameInspector();
+
+ int GetParametersCount();
+ int expression_count();
+ Object* GetFunction();
+ Object* GetParameter(int index);
+ Object* GetExpression(int index);
+ int GetSourcePosition();
+ bool IsConstructor();
+ Object* GetContext();
+
+ JavaScriptFrame* GetArgumentsFrame() { return frame_; }
+ void SetArgumentsFrame(JavaScriptFrame* frame);
+
+ void MaterializeStackLocals(Handle<JSObject> target,
+ Handle<ScopeInfo> scope_info);
+
+ void MaterializeStackLocals(Handle<JSObject> target,
+ Handle<JSFunction> function);
+
+ void UpdateStackLocalsFromMaterializedObject(Handle<JSObject> object,
+ Handle<ScopeInfo> scope_info);
+
+ private:
+ bool ParameterIsShadowedByContextLocal(Handle<ScopeInfo> info,
+ Handle<String> parameter_name);
+
+ JavaScriptFrame* frame_;
+ DeoptimizedFrameInfo* deoptimized_frame_;
+ Isolate* isolate_;
+ bool is_optimized_;
+ bool is_bottommost_;
+ bool has_adapted_arguments_;
+
+ DISALLOW_COPY_AND_ASSIGN(FrameInspector);
+};
+
+
+class DebugFrameHelper : public AllStatic {
+ public:
+ static SaveContext* FindSavedContextForFrame(Isolate* isolate,
+ JavaScriptFrame* frame);
+ // Advances the iterator to the frame that matches the index and returns the
+ // inlined frame index, or -1 if not found. Skips native JS functions.
+ static int FindIndexedNonNativeFrame(JavaScriptFrameIterator* it, int index);
+
+ // Helper functions for wrapping and unwrapping stack frame ids.
+ static Smi* WrapFrameId(StackFrame::Id id) {
+ DCHECK(IsAligned(OffsetFrom(id), static_cast<intptr_t>(4)));
+ return Smi::FromInt(id >> 2);
+ }
+
+ static StackFrame::Id UnwrapFrameId(int wrapped) {
+ return static_cast<StackFrame::Id>(wrapped << 2);
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEBUG_DEBUG_FRAMES_H_
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
new file mode 100644
index 0000000000..b9204f6050
--- /dev/null
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -0,0 +1,769 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/debug/debug-scopes.h"
+
+#include "src/debug/debug.h"
+#include "src/frames-inl.h"
+#include "src/globals.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+
+ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
+ bool ignore_nested_scopes)
+ : isolate_(isolate),
+ frame_inspector_(frame_inspector),
+ nested_scope_chain_(4),
+ seen_script_scope_(false),
+ failed_(false) {
+ if (!frame_inspector->GetContext()->IsContext() ||
+ !frame_inspector->GetFunction()->IsJSFunction()) {
+ // Optimized frame, context or function cannot be materialized. Give up.
+ return;
+ }
+
+ context_ = Handle<Context>(Context::cast(frame_inspector->GetContext()));
+
+ // Catch the case when the debugger stops in an internal function.
+ Handle<JSFunction> function = GetFunction();
+ Handle<SharedFunctionInfo> shared_info(function->shared());
+ Handle<ScopeInfo> scope_info(shared_info->scope_info());
+ if (shared_info->script() == isolate->heap()->undefined_value()) {
+ while (context_->closure() == *function) {
+ context_ = Handle<Context>(context_->previous(), isolate_);
+ }
+ return;
+ }
+
+ // Currently it takes too much time to find nested scopes due to script
+ // parsing. Sometimes we want to run the ScopeIterator as fast as possible
+ // (for example, while collecting async call stacks on every
+ // addEventListener call), even if we drop some nested scopes.
+ // Later we may optimize getting the nested scopes (cache the result?)
+ // and include nested scopes into the "fast" iteration case as well.
+
+ if (!ignore_nested_scopes && shared_info->HasDebugInfo()) {
+ // The source position at return is always the end of the function,
+ // which is not consistent with the current scope chain. Therefore all
+ // nested with, catch and block contexts are skipped, and we can only
+ // inspect the function scope.
+ // This can only happen if we set a break point inside right before the
+ // return, which requires a debug info to be available.
+ Handle<DebugInfo> debug_info(shared_info->GetDebugInfo());
+
+ // PC points to the instruction after the current one, possibly a break
+ // location as well. So the "- 1" to exclude it from the search.
+ Address call_pc = GetFrame()->pc() - 1;
+
+ // Find the break point where execution has stopped.
+ BreakLocation location =
+ BreakLocation::FromAddress(debug_info, ALL_BREAK_LOCATIONS, call_pc);
+
+ ignore_nested_scopes = location.IsReturn();
+ }
+
+ if (ignore_nested_scopes) {
+ if (scope_info->HasContext()) {
+ context_ = Handle<Context>(context_->declaration_context(), isolate_);
+ } else {
+ while (context_->closure() == *function) {
+ context_ = Handle<Context>(context_->previous(), isolate_);
+ }
+ }
+ if (scope_info->scope_type() == FUNCTION_SCOPE ||
+ scope_info->scope_type() == ARROW_SCOPE) {
+ nested_scope_chain_.Add(scope_info);
+ }
+ } else {
+ // Reparse the code and analyze the scopes.
+ Handle<Script> script(Script::cast(shared_info->script()));
+ Scope* scope = NULL;
+
+ // Check whether we are in global, eval or function code.
+ Zone zone;
+ if (scope_info->scope_type() != FUNCTION_SCOPE &&
+ scope_info->scope_type() != ARROW_SCOPE) {
+ // Global or eval code.
+ ParseInfo info(&zone, script);
+ if (scope_info->scope_type() == SCRIPT_SCOPE) {
+ info.set_global();
+ } else {
+ DCHECK(scope_info->scope_type() == EVAL_SCOPE);
+ info.set_eval();
+ info.set_context(Handle<Context>(function->context()));
+ }
+ if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
+ scope = info.literal()->scope();
+ }
+ RetrieveScopeChain(scope, shared_info);
+ } else {
+ // Function code
+ ParseInfo info(&zone, function);
+ if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
+ scope = info.literal()->scope();
+ }
+ RetrieveScopeChain(scope, shared_info);
+ }
+ }
+}
+
+
+ScopeIterator::ScopeIterator(Isolate* isolate, Handle<JSFunction> function)
+ : isolate_(isolate),
+ frame_inspector_(NULL),
+ context_(function->context()),
+ seen_script_scope_(false),
+ failed_(false) {
+ if (function->IsBuiltin()) context_ = Handle<Context>();
+}
+
+
+MUST_USE_RESULT MaybeHandle<JSObject> ScopeIterator::MaterializeScopeDetails() {
+ // Calculate the size of the result.
+ Handle<FixedArray> details =
+ isolate_->factory()->NewFixedArray(kScopeDetailsSize);
+ // Fill in scope details.
+ details->set(kScopeDetailsTypeIndex, Smi::FromInt(Type()));
+ Handle<JSObject> scope_object;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate_, scope_object, ScopeObject(), JSObject);
+ details->set(kScopeDetailsObjectIndex, *scope_object);
+ return isolate_->factory()->NewJSArrayWithElements(details);
+}
+
+
+void ScopeIterator::Next() {
+ DCHECK(!failed_);
+ ScopeType scope_type = Type();
+ if (scope_type == ScopeTypeGlobal) {
+ // The global scope is always the last in the chain.
+ DCHECK(context_->IsNativeContext());
+ context_ = Handle<Context>();
+ return;
+ }
+ if (scope_type == ScopeTypeScript) {
+ seen_script_scope_ = true;
+ if (context_->IsScriptContext()) {
+ context_ = Handle<Context>(context_->previous(), isolate_);
+ }
+ if (!nested_scope_chain_.is_empty()) {
+ DCHECK_EQ(nested_scope_chain_.last()->scope_type(), SCRIPT_SCOPE);
+ nested_scope_chain_.RemoveLast();
+ DCHECK(nested_scope_chain_.is_empty());
+ }
+ CHECK(context_->IsNativeContext());
+ return;
+ }
+ if (nested_scope_chain_.is_empty()) {
+ context_ = Handle<Context>(context_->previous(), isolate_);
+ } else {
+ if (nested_scope_chain_.last()->HasContext()) {
+ DCHECK(context_->previous() != NULL);
+ context_ = Handle<Context>(context_->previous(), isolate_);
+ }
+ nested_scope_chain_.RemoveLast();
+ }
+}
+
+
+// Return the type of the current scope.
+ScopeIterator::ScopeType ScopeIterator::Type() {
+ DCHECK(!failed_);
+ if (!nested_scope_chain_.is_empty()) {
+ Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
+ switch (scope_info->scope_type()) {
+ case FUNCTION_SCOPE:
+ case ARROW_SCOPE:
+ DCHECK(context_->IsFunctionContext() || !scope_info->HasContext());
+ return ScopeTypeLocal;
+ case MODULE_SCOPE:
+ DCHECK(context_->IsModuleContext());
+ return ScopeTypeModule;
+ case SCRIPT_SCOPE:
+ DCHECK(context_->IsScriptContext() || context_->IsNativeContext());
+ return ScopeTypeScript;
+ case WITH_SCOPE:
+ DCHECK(context_->IsWithContext());
+ return ScopeTypeWith;
+ case CATCH_SCOPE:
+ DCHECK(context_->IsCatchContext());
+ return ScopeTypeCatch;
+ case BLOCK_SCOPE:
+ DCHECK(!scope_info->HasContext() || context_->IsBlockContext());
+ return ScopeTypeBlock;
+ case EVAL_SCOPE:
+ UNREACHABLE();
+ }
+ }
+ if (context_->IsNativeContext()) {
+ DCHECK(context_->global_object()->IsGlobalObject());
+ // If we are at the native context and have not yet seen script scope,
+ // fake it.
+ return seen_script_scope_ ? ScopeTypeGlobal : ScopeTypeScript;
+ }
+ if (context_->IsFunctionContext()) {
+ return ScopeTypeClosure;
+ }
+ if (context_->IsCatchContext()) {
+ return ScopeTypeCatch;
+ }
+ if (context_->IsBlockContext()) {
+ return ScopeTypeBlock;
+ }
+ if (context_->IsModuleContext()) {
+ return ScopeTypeModule;
+ }
+ if (context_->IsScriptContext()) {
+ return ScopeTypeScript;
+ }
+ DCHECK(context_->IsWithContext());
+ return ScopeTypeWith;
+}
+
+
+MaybeHandle<JSObject> ScopeIterator::ScopeObject() {
+ DCHECK(!failed_);
+ switch (Type()) {
+ case ScopeIterator::ScopeTypeGlobal:
+ return Handle<JSObject>(CurrentContext()->global_proxy());
+ case ScopeIterator::ScopeTypeScript:
+ return MaterializeScriptScope();
+ case ScopeIterator::ScopeTypeLocal:
+ // Materialize the content of the local scope into a JSObject.
+ DCHECK(nested_scope_chain_.length() == 1);
+ return MaterializeLocalScope();
+ case ScopeIterator::ScopeTypeWith:
+ // Return the with object.
+ return Handle<JSObject>(JSObject::cast(CurrentContext()->extension()));
+ case ScopeIterator::ScopeTypeCatch:
+ return MaterializeCatchScope();
+ case ScopeIterator::ScopeTypeClosure:
+ // Materialize the content of the closure scope into a JSObject.
+ return MaterializeClosure();
+ case ScopeIterator::ScopeTypeBlock:
+ return MaterializeBlockScope();
+ case ScopeIterator::ScopeTypeModule:
+ return MaterializeModuleScope();
+ }
+ UNREACHABLE();
+ return Handle<JSObject>();
+}
+
+
+bool ScopeIterator::HasContext() {
+ ScopeType type = Type();
+ if (type == ScopeTypeBlock || type == ScopeTypeLocal) {
+ if (!nested_scope_chain_.is_empty()) {
+ return nested_scope_chain_.last()->HasContext();
+ }
+ }
+ return true;
+}
+
+
+bool ScopeIterator::SetVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value) {
+ DCHECK(!failed_);
+ switch (Type()) {
+ case ScopeIterator::ScopeTypeGlobal:
+ break;
+ case ScopeIterator::ScopeTypeLocal:
+ return SetLocalVariableValue(variable_name, new_value);
+ case ScopeIterator::ScopeTypeWith:
+ break;
+ case ScopeIterator::ScopeTypeCatch:
+ return SetCatchVariableValue(variable_name, new_value);
+ case ScopeIterator::ScopeTypeClosure:
+ return SetClosureVariableValue(variable_name, new_value);
+ case ScopeIterator::ScopeTypeScript:
+ return SetScriptVariableValue(variable_name, new_value);
+ case ScopeIterator::ScopeTypeBlock:
+ return SetBlockVariableValue(variable_name, new_value);
+ case ScopeIterator::ScopeTypeModule:
+ // TODO(2399): should we implement it?
+ break;
+ }
+ return false;
+}
+
+
+Handle<ScopeInfo> ScopeIterator::CurrentScopeInfo() {
+ DCHECK(!failed_);
+ if (!nested_scope_chain_.is_empty()) {
+ return nested_scope_chain_.last();
+ } else if (context_->IsBlockContext()) {
+ return Handle<ScopeInfo>(ScopeInfo::cast(context_->extension()));
+ } else if (context_->IsFunctionContext()) {
+ return Handle<ScopeInfo>(context_->closure()->shared()->scope_info());
+ }
+ return Handle<ScopeInfo>::null();
+}
+
+
+Handle<Context> ScopeIterator::CurrentContext() {
+ DCHECK(!failed_);
+ if (Type() == ScopeTypeGlobal || Type() == ScopeTypeScript ||
+ nested_scope_chain_.is_empty()) {
+ return context_;
+ } else if (nested_scope_chain_.last()->HasContext()) {
+ return context_;
+ } else {
+ return Handle<Context>();
+ }
+}
+
+#ifdef DEBUG
+// Debug print of the content of the current scope.
+void ScopeIterator::DebugPrint() {
+ OFStream os(stdout);
+ DCHECK(!failed_);
+ switch (Type()) {
+ case ScopeIterator::ScopeTypeGlobal:
+ os << "Global:\n";
+ CurrentContext()->Print(os);
+ break;
+
+ case ScopeIterator::ScopeTypeLocal: {
+ os << "Local:\n";
+ GetFunction()->shared()->scope_info()->Print();
+ if (!CurrentContext().is_null()) {
+ CurrentContext()->Print(os);
+ if (CurrentContext()->has_extension()) {
+ Handle<Object> extension(CurrentContext()->extension(), isolate_);
+ if (extension->IsJSContextExtensionObject()) {
+ extension->Print(os);
+ }
+ }
+ }
+ break;
+ }
+
+ case ScopeIterator::ScopeTypeWith:
+ os << "With:\n";
+ CurrentContext()->extension()->Print(os);
+ break;
+
+ case ScopeIterator::ScopeTypeCatch:
+ os << "Catch:\n";
+ CurrentContext()->extension()->Print(os);
+ CurrentContext()->get(Context::THROWN_OBJECT_INDEX)->Print(os);
+ break;
+
+ case ScopeIterator::ScopeTypeClosure:
+ os << "Closure:\n";
+ CurrentContext()->Print(os);
+ if (CurrentContext()->has_extension()) {
+ Handle<Object> extension(CurrentContext()->extension(), isolate_);
+ if (extension->IsJSContextExtensionObject()) {
+ extension->Print(os);
+ }
+ }
+ break;
+
+ case ScopeIterator::ScopeTypeScript:
+ os << "Script:\n";
+ CurrentContext()
+ ->global_object()
+ ->native_context()
+ ->script_context_table()
+ ->Print(os);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ PrintF("\n");
+}
+#endif
+
+
+void ScopeIterator::RetrieveScopeChain(Scope* scope,
+ Handle<SharedFunctionInfo> shared_info) {
+ if (scope != NULL) {
+ int source_position = frame_inspector_->GetSourcePosition();
+ scope->GetNestedScopeChain(isolate_, &nested_scope_chain_, source_position);
+ } else {
+ // A failed reparse indicates that the preparser has diverged from the
+ // parser or that the preparse data given to the initial parse has been
+ // faulty. We fail in debug mode but in release mode we only provide the
+ // information we get from the context chain but nothing about
+ // completely stack allocated scopes or stack allocated locals.
+ // Or it could be due to stack overflow.
+ DCHECK(isolate_->has_pending_exception());
+ failed_ = true;
+ }
+}
+
+
+MaybeHandle<JSObject> ScopeIterator::MaterializeScriptScope() {
+ Handle<GlobalObject> global(CurrentContext()->global_object());
+ Handle<ScriptContextTable> script_contexts(
+ global->native_context()->script_context_table());
+
+ Handle<JSObject> script_scope =
+ isolate_->factory()->NewJSObject(isolate_->object_function());
+
+ for (int context_index = 0; context_index < script_contexts->used();
+ context_index++) {
+ Handle<Context> context =
+ ScriptContextTable::GetContext(script_contexts, context_index);
+ Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
+ CopyContextLocalsToScopeObject(scope_info, context, script_scope);
+ }
+ return script_scope;
+}
+
+
+MaybeHandle<JSObject> ScopeIterator::MaterializeLocalScope() {
+ Handle<JSFunction> function = GetFunction();
+
+ Handle<JSObject> local_scope =
+ isolate_->factory()->NewJSObject(isolate_->object_function());
+ frame_inspector_->MaterializeStackLocals(local_scope, function);
+
+ Handle<Context> frame_context(Context::cast(frame_inspector_->GetContext()));
+
+ HandleScope scope(isolate_);
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<ScopeInfo> scope_info(shared->scope_info());
+
+ if (!scope_info->HasContext()) return local_scope;
+
+ // Third fill all context locals.
+ Handle<Context> function_context(frame_context->declaration_context());
+ CopyContextLocalsToScopeObject(scope_info, function_context, local_scope);
+
+ // Finally copy any properties from the function context extension.
+ // These will be variables introduced by eval.
+ if (function_context->closure() == *function) {
+ if (function_context->has_extension() &&
+ !function_context->IsNativeContext()) {
+ Handle<JSObject> ext(JSObject::cast(function_context->extension()));
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate_, keys, JSReceiver::GetKeys(ext, JSReceiver::INCLUDE_PROTOS),
+ JSObject);
+
+ for (int i = 0; i < keys->length(); i++) {
+ // Names of variables introduced by eval are strings.
+ DCHECK(keys->get(i)->IsString());
+ Handle<String> key(String::cast(keys->get(i)));
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate_, value, Object::GetPropertyOrElement(ext, key), JSObject);
+ RETURN_ON_EXCEPTION(isolate_,
+ Runtime::SetObjectProperty(isolate_, local_scope,
+ key, value, SLOPPY),
+ JSObject);
+ }
+ }
+ }
+
+ return local_scope;
+}
+
+
+// Create a plain JSObject which materializes the closure content for the
+// context.
+Handle<JSObject> ScopeIterator::MaterializeClosure() {
+ Handle<Context> context = CurrentContext();
+ DCHECK(context->IsFunctionContext());
+
+ Handle<SharedFunctionInfo> shared(context->closure()->shared());
+ Handle<ScopeInfo> scope_info(shared->scope_info());
+
+ // Allocate and initialize a JSObject with all the content of this function
+ // closure.
+ Handle<JSObject> closure_scope =
+ isolate_->factory()->NewJSObject(isolate_->object_function());
+
+ // Fill all context locals to the context extension.
+ CopyContextLocalsToScopeObject(scope_info, context, closure_scope);
+
+ // Finally copy any properties from the function context extension. This will
+ // be variables introduced by eval.
+ if (context->has_extension()) {
+ Handle<JSObject> ext(JSObject::cast(context->extension()));
+ DCHECK(ext->IsJSContextExtensionObject());
+ Handle<FixedArray> keys =
+ JSReceiver::GetKeys(ext, JSReceiver::OWN_ONLY).ToHandleChecked();
+
+ for (int i = 0; i < keys->length(); i++) {
+ HandleScope scope(isolate_);
+ // Names of variables introduced by eval are strings.
+ DCHECK(keys->get(i)->IsString());
+ Handle<String> key(String::cast(keys->get(i)));
+ Handle<Object> value = Object::GetProperty(ext, key).ToHandleChecked();
+ JSObject::SetOwnPropertyIgnoreAttributes(closure_scope, key, value, NONE)
+ .Check();
+ }
+ }
+
+ return closure_scope;
+}
+
+
+// Create a plain JSObject which materializes the scope for the specified
+// catch context.
+Handle<JSObject> ScopeIterator::MaterializeCatchScope() {
+ Handle<Context> context = CurrentContext();
+ DCHECK(context->IsCatchContext());
+ Handle<String> name(String::cast(context->extension()));
+ Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX),
+ isolate_);
+ Handle<JSObject> catch_scope =
+ isolate_->factory()->NewJSObject(isolate_->object_function());
+ JSObject::SetOwnPropertyIgnoreAttributes(catch_scope, name, thrown_object,
+ NONE)
+ .Check();
+ return catch_scope;
+}
+
+
+// Create a plain JSObject which materializes the block scope for the specified
+// block context.
+Handle<JSObject> ScopeIterator::MaterializeBlockScope() {
+ Handle<JSObject> block_scope =
+ isolate_->factory()->NewJSObject(isolate_->object_function());
+
+ Handle<Context> context = Handle<Context>::null();
+ if (!nested_scope_chain_.is_empty()) {
+ Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
+ frame_inspector_->MaterializeStackLocals(block_scope, scope_info);
+ if (scope_info->HasContext()) context = CurrentContext();
+ } else {
+ context = CurrentContext();
+ }
+
+ if (!context.is_null()) {
+ Handle<ScopeInfo> scope_info_from_context(
+ ScopeInfo::cast(context->extension()));
+ // Fill all context locals.
+ CopyContextLocalsToScopeObject(scope_info_from_context, context,
+ block_scope);
+ }
+ return block_scope;
+}
+
+
+// Create a plain JSObject which materializes the module scope for the specified
+// module context.
+MaybeHandle<JSObject> ScopeIterator::MaterializeModuleScope() {
+ Handle<Context> context = CurrentContext();
+ DCHECK(context->IsModuleContext());
+ Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
+
+ // Allocate and initialize a JSObject with all the members of the debugged
+ // module.
+ Handle<JSObject> module_scope =
+ isolate_->factory()->NewJSObject(isolate_->object_function());
+
+ // Fill all context locals.
+ CopyContextLocalsToScopeObject(scope_info, context, module_scope);
+
+ return module_scope;
+}
+
+
+// Set the context local variable value.
+bool ScopeIterator::SetContextLocalValue(Handle<ScopeInfo> scope_info,
+ Handle<Context> context,
+ Handle<String> variable_name,
+ Handle<Object> new_value) {
+ for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
+ Handle<String> next_name(scope_info->ContextLocalName(i));
+ if (String::Equals(variable_name, next_name)) {
+ VariableMode mode;
+ VariableLocation location;
+ InitializationFlag init_flag;
+ MaybeAssignedFlag maybe_assigned_flag;
+ int context_index =
+ ScopeInfo::ContextSlotIndex(scope_info, next_name, &mode, &location,
+ &init_flag, &maybe_assigned_flag);
+ context->set(context_index, *new_value);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+bool ScopeIterator::SetLocalVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value) {
+ JavaScriptFrame* frame = GetFrame();
+ // Optimized frames are not supported.
+ if (frame->is_optimized()) return false;
+
+ Handle<JSFunction> function(frame->function());
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<ScopeInfo> scope_info(shared->scope_info());
+
+ bool default_result = false;
+
+ // Parameters.
+ for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+ HandleScope scope(isolate_);
+ if (String::Equals(handle(scope_info->ParameterName(i)), variable_name)) {
+ frame->SetParameterValue(i, *new_value);
+ // Argument might be shadowed in heap context, don't stop here.
+ default_result = true;
+ }
+ }
+
+ // Stack locals.
+ for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+ HandleScope scope(isolate_);
+ if (String::Equals(handle(scope_info->StackLocalName(i)), variable_name)) {
+ frame->SetExpression(scope_info->StackLocalIndex(i), *new_value);
+ return true;
+ }
+ }
+
+ if (scope_info->HasContext()) {
+ // Context locals.
+ Handle<Context> frame_context(Context::cast(frame->context()));
+ Handle<Context> function_context(frame_context->declaration_context());
+ if (SetContextLocalValue(scope_info, function_context, variable_name,
+ new_value)) {
+ return true;
+ }
+
+ // Function context extension. These are variables introduced by eval.
+ if (function_context->closure() == *function) {
+ if (function_context->has_extension() &&
+ !function_context->IsNativeContext()) {
+ Handle<JSObject> ext(JSObject::cast(function_context->extension()));
+
+ Maybe<bool> maybe = JSReceiver::HasProperty(ext, variable_name);
+ DCHECK(maybe.IsJust());
+ if (maybe.FromJust()) {
+ // We don't expect this to do anything except replacing
+ // property value.
+ Runtime::SetObjectProperty(isolate_, ext, variable_name, new_value,
+ SLOPPY)
+ .Assert();
+ return true;
+ }
+ }
+ }
+ }
+
+ return default_result;
+}
+
+
+bool ScopeIterator::SetBlockVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value) {
+ Handle<ScopeInfo> scope_info = CurrentScopeInfo();
+ JavaScriptFrame* frame = GetFrame();
+
+ for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+ HandleScope scope(isolate_);
+ if (String::Equals(handle(scope_info->StackLocalName(i)), variable_name)) {
+ frame->SetExpression(scope_info->StackLocalIndex(i), *new_value);
+ return true;
+ }
+ }
+
+ if (HasContext()) {
+ return SetContextLocalValue(scope_info, CurrentContext(), variable_name,
+ new_value);
+ }
+ return false;
+}
+
+
+// This method copies structure of MaterializeClosure method above.
+bool ScopeIterator::SetClosureVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value) {
+ Handle<Context> context = CurrentContext();
+ DCHECK(context->IsFunctionContext());
+
+ // Context locals to the context extension.
+ Handle<SharedFunctionInfo> shared(context->closure()->shared());
+ Handle<ScopeInfo> scope_info(shared->scope_info());
+ if (SetContextLocalValue(scope_info, context, variable_name, new_value)) {
+ return true;
+ }
+
+ // Properties from the function context extension. This will
+ // be variables introduced by eval.
+ if (context->has_extension()) {
+ Handle<JSObject> ext(JSObject::cast(context->extension()));
+ DCHECK(ext->IsJSContextExtensionObject());
+ Maybe<bool> maybe = JSReceiver::HasOwnProperty(ext, variable_name);
+ DCHECK(maybe.IsJust());
+ if (maybe.FromJust()) {
+ // We don't expect this to do anything except replacing property value.
+ JSObject::SetOwnPropertyIgnoreAttributes(ext, variable_name, new_value,
+ NONE)
+ .Check();
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+bool ScopeIterator::SetScriptVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value) {
+ Handle<Context> context = CurrentContext();
+ Handle<ScriptContextTable> script_contexts(
+ context->global_object()->native_context()->script_context_table());
+ ScriptContextTable::LookupResult lookup_result;
+ if (ScriptContextTable::Lookup(script_contexts, variable_name,
+ &lookup_result)) {
+ Handle<Context> script_context = ScriptContextTable::GetContext(
+ script_contexts, lookup_result.context_index);
+ script_context->set(lookup_result.slot_index, *new_value);
+ return true;
+ }
+
+ return false;
+}
+
+
+bool ScopeIterator::SetCatchVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value) {
+ Handle<Context> context = CurrentContext();
+ DCHECK(context->IsCatchContext());
+ Handle<String> name(String::cast(context->extension()));
+ if (!String::Equals(name, variable_name)) {
+ return false;
+ }
+ context->set(Context::THROWN_OBJECT_INDEX, *new_value);
+ return true;
+}
+
+
+void ScopeIterator::CopyContextLocalsToScopeObject(
+ Handle<ScopeInfo> scope_info, Handle<Context> context,
+ Handle<JSObject> scope_object) {
+ Isolate* isolate = scope_info->GetIsolate();
+ int local_count = scope_info->ContextLocalCount();
+ if (local_count == 0) return;
+ // Fill all context locals to the context extension.
+ int first_context_var = scope_info->StackLocalCount();
+ int start = scope_info->ContextLocalNameEntriesIndex();
+ for (int i = 0; i < local_count; ++i) {
+ if (scope_info->LocalIsSynthetic(first_context_var + i)) continue;
+ int context_index = Context::MIN_CONTEXT_SLOTS + i;
+ Handle<Object> value = Handle<Object>(context->get(context_index), isolate);
+ // Reflect variables under TDZ as undefined in scope object.
+ if (value->IsTheHole()) continue;
+ // This should always succeed.
+ // TODO(verwaest): Use AddDataProperty instead.
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ scope_object, handle(String::cast(scope_info->get(i + start))), value,
+ ::NONE)
+ .Check();
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
new file mode 100644
index 0000000000..0247cc4bce
--- /dev/null
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -0,0 +1,126 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_DEBUG_SCOPES_H_
+#define V8_DEBUG_DEBUG_SCOPES_H_
+
+#include "src/debug/debug-frames.h"
+#include "src/frames.h"
+
+namespace v8 {
+namespace internal {
+
+// Iterate over the actual scopes visible from a stack frame or from a closure.
+// The iteration proceeds from the innermost visible nested scope outwards.
+// All scopes are backed by an actual context except the local scope,
+// which is inserted "artificially" in the context chain.
+class ScopeIterator {
+ public:
+ enum ScopeType {
+ ScopeTypeGlobal = 0,
+ ScopeTypeLocal,
+ ScopeTypeWith,
+ ScopeTypeClosure,
+ ScopeTypeCatch,
+ ScopeTypeBlock,
+ ScopeTypeScript,
+ ScopeTypeModule
+ };
+
+ static const int kScopeDetailsTypeIndex = 0;
+ static const int kScopeDetailsObjectIndex = 1;
+ static const int kScopeDetailsSize = 2;
+
+ ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
+ bool ignore_nested_scopes = false);
+
+ ScopeIterator(Isolate* isolate, Handle<JSFunction> function);
+
+ MUST_USE_RESULT MaybeHandle<JSObject> MaterializeScopeDetails();
+
+ // More scopes?
+ bool Done() {
+ DCHECK(!failed_);
+ return context_.is_null();
+ }
+
+ bool Failed() { return failed_; }
+
+ // Move to the next scope.
+ void Next();
+
+ // Return the type of the current scope.
+ ScopeType Type();
+
+ // Return the JavaScript object with the content of the current scope.
+ MaybeHandle<JSObject> ScopeObject();
+
+ bool HasContext();
+
+ // Set variable value and return true on success.
+ bool SetVariableValue(Handle<String> variable_name, Handle<Object> new_value);
+
+ Handle<ScopeInfo> CurrentScopeInfo();
+
+ // Return the context for this scope. For the local context there might not
+ // be an actual context.
+ Handle<Context> CurrentContext();
+
+#ifdef DEBUG
+ // Debug print of the content of the current scope.
+ void DebugPrint();
+#endif
+
+ private:
+ Isolate* isolate_;
+ FrameInspector* const frame_inspector_;
+ Handle<Context> context_;
+ List<Handle<ScopeInfo> > nested_scope_chain_;
+ bool seen_script_scope_;
+ bool failed_;
+
+ inline JavaScriptFrame* GetFrame() {
+ return frame_inspector_->GetArgumentsFrame();
+ }
+
+ inline Handle<JSFunction> GetFunction() {
+ return Handle<JSFunction>(
+ JSFunction::cast(frame_inspector_->GetFunction()));
+ }
+
+ void RetrieveScopeChain(Scope* scope, Handle<SharedFunctionInfo> shared_info);
+
+ MUST_USE_RESULT MaybeHandle<JSObject> MaterializeScriptScope();
+ MUST_USE_RESULT MaybeHandle<JSObject> MaterializeLocalScope();
+ MUST_USE_RESULT MaybeHandle<JSObject> MaterializeModuleScope();
+ Handle<JSObject> MaterializeClosure();
+ Handle<JSObject> MaterializeCatchScope();
+ Handle<JSObject> MaterializeBlockScope();
+
+ bool SetLocalVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value);
+ bool SetBlockVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value);
+ bool SetClosureVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value);
+ bool SetScriptVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value);
+ bool SetCatchVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value);
+ bool SetContextLocalValue(Handle<ScopeInfo> scope_info,
+ Handle<Context> context,
+ Handle<String> variable_name,
+ Handle<Object> new_value);
+
+ void CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
+ Handle<Context> context,
+ Handle<JSObject> scope_object);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEBUG_DEBUG_SCOPES_H_
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug/debug.cc
index e952fe7ebb..3ab10132a8 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/debug/debug.h"
#include "src/api.h"
#include "src/arguments.h"
@@ -11,10 +11,10 @@
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/compiler.h"
-#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
-#include "src/full-codegen.h"
+#include "src/frames-inl.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
#include "src/list.h"
#include "src/log.h"
@@ -36,7 +36,6 @@ Debug::Debug(Isolate* isolate)
is_active_(false),
is_suppressed_(false),
live_edit_enabled_(true), // TODO(yangguo): set to false by default.
- has_break_points_(false),
break_disabled_(false),
in_debug_event_listener_(false),
break_on_exception_(false),
@@ -59,16 +58,11 @@ static v8::Local<v8::Context> GetDebugEventContext(Isolate* isolate) {
BreakLocation::BreakLocation(Handle<DebugInfo> debug_info, RelocInfo* rinfo,
- RelocInfo* original_rinfo, int position,
- int statement_position)
+ int position, int statement_position)
: debug_info_(debug_info),
pc_offset_(static_cast<int>(rinfo->pc() - debug_info->code()->entry())),
- original_pc_offset_(static_cast<int>(
- original_rinfo->pc() - debug_info->original_code()->entry())),
rmode_(rinfo->rmode()),
- original_rmode_(original_rinfo->rmode()),
data_(rinfo->data()),
- original_data_(original_rinfo->data()),
position_(position),
statement_position_(statement_position) {}
@@ -76,30 +70,40 @@ BreakLocation::BreakLocation(Handle<DebugInfo> debug_info, RelocInfo* rinfo,
BreakLocation::Iterator::Iterator(Handle<DebugInfo> debug_info,
BreakLocatorType type)
: debug_info_(debug_info),
- type_(type),
- reloc_iterator_(debug_info->code(),
- ~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE)),
- reloc_iterator_original_(
- debug_info->original_code(),
- ~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE)),
+ reloc_iterator_(debug_info->code(), GetModeMask(type)),
break_index_(-1),
position_(1),
statement_position_(1) {
- Next();
+ if (!Done()) Next();
+}
+
+
+int BreakLocation::Iterator::GetModeMask(BreakLocatorType type) {
+ int mask = 0;
+ mask |= RelocInfo::ModeMask(RelocInfo::POSITION);
+ mask |= RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION);
+ mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
+ mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_CALL);
+ mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL);
+ if (type == ALL_BREAK_LOCATIONS) {
+ mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
+ mask |= RelocInfo::ModeMask(RelocInfo::DEBUGGER_STATEMENT);
+ }
+ return mask;
}
void BreakLocation::Iterator::Next() {
DisallowHeapAllocation no_gc;
- DCHECK(!RinfoDone());
+ DCHECK(!Done());
// Iterate through reloc info for code and original code stopping at each
// breakable code target.
bool first = break_index_ == -1;
- while (!RinfoDone()) {
- if (!first) RinfoNext();
+ while (!Done()) {
+ if (!first) reloc_iterator_.next();
first = false;
- if (RinfoDone()) return;
+ if (Done()) return;
// Whenever a statement position or (plain) position is passed update the
// current value of these.
@@ -117,8 +121,10 @@ void BreakLocation::Iterator::Next() {
continue;
}
- // Check for break at return.
- if (RelocInfo::IsJSReturn(rmode())) {
+ DCHECK(RelocInfo::IsDebugBreakSlot(rmode()) ||
+ RelocInfo::IsDebuggerStatement(rmode()));
+
+ if (RelocInfo::IsDebugBreakSlotAtReturn(rmode())) {
// Set the positions to the end of the function.
if (debug_info_->shared()->HasSourceCode()) {
position_ = debug_info_->shared()->end_position() -
@@ -127,43 +133,11 @@ void BreakLocation::Iterator::Next() {
position_ = 0;
}
statement_position_ = position_;
- break_index_++;
- break;
}
- if (RelocInfo::IsCodeTarget(rmode())) {
- // Check for breakable code target. Look in the original code as setting
- // break points can cause the code targets in the running (debugged) code
- // to be of a different kind than in the original code.
- Address target = original_rinfo()->target_address();
- Code* code = Code::GetCodeFromTargetAddress(target);
-
- if (RelocInfo::IsConstructCall(rmode()) || code->is_call_stub()) {
- break_index_++;
- break;
- }
-
- if (code->kind() == Code::STUB &&
- CodeStub::GetMajorKey(code) == CodeStub::CallFunction) {
- break_index_++;
- break;
- }
- }
-
- // Skip below if we only want locations for calls and returns.
- if (type_ == CALLS_AND_RETURNS) continue;
-
- if (RelocInfo::IsDebuggerStatement(rmode())) {
- break_index_++;
- break;
- }
-
- if (RelocInfo::IsDebugBreakSlot(rmode()) && type_ != CALLS_AND_RETURNS) {
- // There is always a possible break point at a debug break slot.
- break_index_++;
- break;
- }
+ break;
}
+ break_index_++;
}
@@ -244,7 +218,6 @@ BreakLocation BreakLocation::FromPosition(Handle<DebugInfo> debug_info,
void BreakLocation::SetBreakPoint(Handle<Object> break_point_object) {
// If there is not already a real break point here patch code with debug
// break.
- DCHECK(code()->has_debug_break_slots());
if (!HasBreakPoint()) SetDebugBreak();
DCHECK(IsDebugBreak() || IsDebuggerStatement());
// Set the break point information.
@@ -305,16 +278,11 @@ void BreakLocation::SetDebugBreak() {
// handler as the handler and the function is the same.
if (IsDebugBreak()) return;
- if (IsExit()) {
- // Patch the frame exit code with a break point.
- SetDebugBreakAtReturn();
- } else if (IsDebugBreakSlot()) {
- // Patch the code in the break slot.
- SetDebugBreakAtSlot();
- } else {
- // Patch the IC call.
- SetDebugBreakAtIC();
- }
+ DCHECK(IsDebugBreakSlot());
+ Builtins* builtins = debug_info_->GetIsolate()->builtins();
+ Handle<Code> target =
+ IsReturn() ? builtins->Return_DebugBreak() : builtins->Slot_DebugBreak();
+ DebugCodegen::PatchDebugBreakSlot(pc(), target);
DCHECK(IsDebugBreak());
}
@@ -323,92 +291,22 @@ void BreakLocation::ClearDebugBreak() {
// Debugger statement always calls debugger. No need to modify it.
if (IsDebuggerStatement()) return;
- if (IsExit()) {
- // Restore the frame exit code with a break point.
- RestoreFromOriginal(Assembler::kJSReturnSequenceLength);
- } else if (IsDebugBreakSlot()) {
- // Restore the code in the break slot.
- RestoreFromOriginal(Assembler::kDebugBreakSlotLength);
- } else {
- // Restore the IC call.
- rinfo().set_target_address(original_rinfo().target_address());
- // Some ICs store data in the feedback vector. Clear this to ensure we
- // won't miss future stepping requirements.
- SharedFunctionInfo* shared = debug_info_->shared();
- shared->feedback_vector()->ClearICSlots(shared);
- }
+ DCHECK(IsDebugBreakSlot());
+ DebugCodegen::ClearDebugBreakSlot(pc());
DCHECK(!IsDebugBreak());
}
-void BreakLocation::RestoreFromOriginal(int length_in_bytes) {
- memcpy(pc(), original_pc(), length_in_bytes);
- CpuFeatures::FlushICache(pc(), length_in_bytes);
-}
-
-
bool BreakLocation::IsStepInLocation() const {
- if (IsConstructCall()) return true;
- if (RelocInfo::IsCodeTarget(rmode())) {
- HandleScope scope(debug_info_->GetIsolate());
- Handle<Code> target_code = CodeTarget();
- return target_code->is_call_stub();
- }
- return false;
+ return IsConstructCall() || IsCall();
}
bool BreakLocation::IsDebugBreak() const {
- if (IsExit()) {
- return rinfo().IsPatchedReturnSequence();
- } else if (IsDebugBreakSlot()) {
+ if (IsDebugBreakSlot()) {
return rinfo().IsPatchedDebugBreakSlotSequence();
- } else {
- return Debug::IsDebugBreak(rinfo().target_address());
- }
-}
-
-
-// Find the builtin to use for invoking the debug break
-static Handle<Code> DebugBreakForIC(Handle<Code> code, RelocInfo::Mode mode) {
- Isolate* isolate = code->GetIsolate();
-
- // Find the builtin debug break function matching the calling convention
- // used by the call site.
- if (code->is_inline_cache_stub()) {
- DCHECK(code->kind() == Code::CALL_IC);
- return isolate->builtins()->CallICStub_DebugBreak();
- }
- if (RelocInfo::IsConstructCall(mode)) {
- if (code->has_function_cache()) {
- return isolate->builtins()->CallConstructStub_Recording_DebugBreak();
- } else {
- return isolate->builtins()->CallConstructStub_DebugBreak();
- }
- }
- if (code->kind() == Code::STUB) {
- DCHECK(CodeStub::GetMajorKey(*code) == CodeStub::CallFunction);
- return isolate->builtins()->CallFunctionStub_DebugBreak();
- }
-
- UNREACHABLE();
- return Handle<Code>::null();
-}
-
-
-void BreakLocation::SetDebugBreakAtIC() {
- // Patch the original code with the current address as the current address
- // might have changed by the inline caching since the code was copied.
- original_rinfo().set_target_address(rinfo().target_address());
-
- if (RelocInfo::IsCodeTarget(rmode_)) {
- Handle<Code> target_code = CodeTarget();
-
- // Patch the code to invoke the builtin debug break function matching the
- // calling convention used by the call site.
- Handle<Code> debug_break_code = DebugBreakForIC(target_code, rmode_);
- rinfo().set_target_address(debug_break_code->entry());
}
+ return false;
}
@@ -417,36 +315,6 @@ Handle<Object> BreakLocation::BreakPointObjects() const {
}
-Handle<Code> BreakLocation::CodeTarget() const {
- DCHECK(IsCodeTarget());
- Address target = rinfo().target_address();
- return Handle<Code>(Code::GetCodeFromTargetAddress(target));
-}
-
-
-Handle<Code> BreakLocation::OriginalCodeTarget() const {
- DCHECK(IsCodeTarget());
- Address target = original_rinfo().target_address();
- return Handle<Code>(Code::GetCodeFromTargetAddress(target));
-}
-
-
-bool BreakLocation::Iterator::RinfoDone() const {
- DCHECK(reloc_iterator_.done() == reloc_iterator_original_.done());
- return reloc_iterator_.done();
-}
-
-
-void BreakLocation::Iterator::RinfoNext() {
- reloc_iterator_.next();
- reloc_iterator_original_.next();
-#ifdef DEBUG
- DCHECK(reloc_iterator_.done() == reloc_iterator_original_.done());
- DCHECK(reloc_iterator_.done() || rmode() == original_rmode());
-#endif
-}
-
-
// Threading support.
void Debug::ThreadInit() {
thread_local_.break_count_ = 0;
@@ -546,98 +414,21 @@ ScriptCache::~ScriptCache() {
}
-void Debug::HandlePhantomDebugInfo(
- const v8::WeakCallbackInfo<DebugInfoListNode>& data) {
- DebugInfoListNode* node = data.GetParameter();
- node->ClearInfo();
- Debug* debug = reinterpret_cast<Isolate*>(data.GetIsolate())->debug();
- debug->RemoveDebugInfo(node);
-#ifdef DEBUG
- for (DebugInfoListNode* n = debug->debug_info_list_;
- n != NULL;
- n = n->next()) {
- DCHECK(n != node);
- }
-#endif
-}
-
-
DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
// Globalize the request debug info object and make it weak.
GlobalHandles* global_handles = debug_info->GetIsolate()->global_handles();
debug_info_ =
Handle<DebugInfo>::cast(global_handles->Create(debug_info)).location();
- typedef WeakCallbackInfo<void>::Callback Callback;
- GlobalHandles::MakeWeak(
- reinterpret_cast<Object**>(debug_info_), this,
- reinterpret_cast<Callback>(Debug::HandlePhantomDebugInfo),
- v8::WeakCallbackType::kParameter);
}
-void DebugInfoListNode::ClearInfo() {
+DebugInfoListNode::~DebugInfoListNode() {
if (debug_info_ == nullptr) return;
GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_info_));
debug_info_ = nullptr;
}
-bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
- Factory* factory = isolate->factory();
- HandleScope scope(isolate);
-
- // Bail out if the index is invalid.
- if (index == -1) return false;
-
- // Find source and name for the requested script.
- Handle<String> source_code =
- isolate->bootstrapper()->SourceLookup<Natives>(index);
- Vector<const char> name = Natives::GetScriptName(index);
- Handle<String> script_name =
- factory->NewStringFromAscii(name).ToHandleChecked();
- Handle<Context> context = isolate->native_context();
-
- // Compile the script.
- Handle<SharedFunctionInfo> function_info;
- function_info = Compiler::CompileScript(
- source_code, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
- context, NULL, NULL, ScriptCompiler::kNoCompileOptions, NATIVES_CODE,
- false);
- if (function_info.is_null()) return false;
-
- // Execute the shared function in the debugger context.
- Handle<JSFunction> function =
- factory->NewFunctionFromSharedFunctionInfo(function_info, context);
-
- MaybeHandle<Object> maybe_exception;
- MaybeHandle<Object> result = Execution::TryCall(
- function, handle(context->global_proxy()), 0, NULL, &maybe_exception);
-
- // Check for caught exceptions.
- if (result.is_null()) {
- DCHECK(!isolate->has_pending_exception());
- MessageLocation computed_location;
- isolate->ComputeLocation(&computed_location);
- Handle<JSMessageObject> message = MessageHandler::MakeMessageObject(
- isolate, MessageTemplate::kDebuggerLoading, &computed_location,
- isolate->factory()->undefined_value(), Handle<JSArray>());
- DCHECK(!isolate->has_pending_exception());
- Handle<Object> exception;
- if (maybe_exception.ToHandle(&exception)) {
- isolate->set_pending_exception(*exception);
- MessageHandler::ReportMessage(isolate, NULL, message);
- }
- DCHECK(!maybe_exception.is_null());
- return false;
- }
-
- // Mark this script as native and return successfully.
- Handle<Script> script(Script::cast(function->shared()->script()));
- script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
- return true;
-}
-
-
bool Debug::Load() {
// Return if debugger is already loaded.
if (is_loaded()) return true;
@@ -656,38 +447,12 @@ bool Debug::Load() {
HandleScope scope(isolate_);
ExtensionConfiguration no_extensions;
Handle<Context> context = isolate_->bootstrapper()->CreateEnvironment(
- MaybeHandle<JSGlobalProxy>(), v8::Local<ObjectTemplate>(),
- &no_extensions);
+ MaybeHandle<JSGlobalProxy>(), v8::Local<ObjectTemplate>(), &no_extensions,
+ DEBUG_CONTEXT);
// Fail if no context could be created.
if (context.is_null()) return false;
- // Use the debugger context.
- SaveContext save(isolate_);
- isolate_->set_context(*context);
-
- // Expose the builtins object in the debugger context.
- Handle<String> key = isolate_->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("builtins"));
- Handle<GlobalObject> global =
- Handle<GlobalObject>(context->global_object(), isolate_);
- Handle<JSBuiltinsObject> builtin =
- Handle<JSBuiltinsObject>(global->builtins(), isolate_);
- RETURN_ON_EXCEPTION_VALUE(
- isolate_, Object::SetProperty(global, key, builtin, SLOPPY), false);
-
- // Compile the JavaScript for the debugger in the debugger context.
- bool caught_exception =
- !CompileDebuggerScript(isolate_, Natives::GetIndex("mirror")) ||
- !CompileDebuggerScript(isolate_, Natives::GetIndex("debug"));
-
- if (FLAG_enable_liveedit) {
- caught_exception = caught_exception ||
- !CompileDebuggerScript(isolate_, Natives::GetIndex("liveedit"));
- }
- // Check for caught exceptions.
- if (caught_exception) return false;
-
debug_context_ = Handle<Context>::cast(
isolate_->global_handles()->Create(*context));
return true;
@@ -732,9 +497,13 @@ void Debug::Break(Arguments args, JavaScriptFrame* frame) {
PostponeInterruptsScope postpone(isolate_);
// Get the debug info (create it if it does not exist).
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>(frame->function()->shared());
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+ Handle<JSFunction> function(frame->function());
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (!EnsureDebugInfo(shared, function)) {
+ // Return if we failed to retrieve the debug info.
+ return;
+ }
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo());
// Find the break point where execution has stopped.
// PC points to the instruction after the current one, possibly a break
@@ -825,15 +594,6 @@ void Debug::Break(Arguments args, JavaScriptFrame* frame) {
}
-RUNTIME_FUNCTION(Debug_Break) {
- // Get the top-most JavaScript frame.
- JavaScriptFrameIterator it(isolate);
- isolate->debug()->Break(args, it.frame());
- isolate->debug()->SetAfterBreakTarget(it.frame());
- return isolate->heap()->undefined_value();
-}
-
-
// Check the break point objects for whether one or more are actually
// triggered. This function returns a JSArray with the break point objects
// which is triggered.
@@ -872,6 +632,18 @@ Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
}
+MaybeHandle<Object> Debug::CallFunction(const char* name, int argc,
+ Handle<Object> args[]) {
+ PostponeInterruptsScope no_interrupts(isolate_);
+ AssertDebugContext();
+ Handle<Object> holder = isolate_->natives_utils_object();
+ Handle<JSFunction> fun = Handle<JSFunction>::cast(
+ Object::GetProperty(isolate_, holder, name, STRICT).ToHandleChecked());
+ Handle<Object> undefined = isolate_->factory()->undefined_value();
+ return Execution::TryCall(fun, undefined, argc, args);
+}
+
+
// Check whether a single break point object is triggered.
bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
Factory* factory = isolate_->factory();
@@ -880,25 +652,14 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
// Ignore check if break point object is not a JSObject.
if (!break_point_object->IsJSObject()) return true;
- // Get the function IsBreakPointTriggered (defined in debug-debugger.js).
- Handle<String> is_break_point_triggered_string =
- factory->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("IsBreakPointTriggered"));
- Handle<GlobalObject> debug_global(debug_context()->global_object());
- Handle<JSFunction> check_break_point =
- Handle<JSFunction>::cast(Object::GetProperty(
- debug_global, is_break_point_triggered_string).ToHandleChecked());
-
// Get the break id as an object.
Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
- // Call HandleBreakPointx.
+ // Call IsBreakPointTriggered.
Handle<Object> argv[] = { break_id, break_point_object };
Handle<Object> result;
- if (!Execution::TryCall(check_break_point,
- isolate_->js_builtins_object(),
- arraysize(argv),
- argv).ToHandle(&result)) {
+ if (!CallFunction("IsBreakPointTriggered", arraysize(argv), argv)
+ .ToHandle(&result)) {
return false;
}
@@ -907,27 +668,11 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
}
-// Check whether the function has debug information.
-bool Debug::HasDebugInfo(Handle<SharedFunctionInfo> shared) {
- return !shared->debug_info()->IsUndefined();
-}
-
-
-// Return the debug info for this function. EnsureDebugInfo must be called
-// prior to ensure the debug info has been generated for shared.
-Handle<DebugInfo> Debug::GetDebugInfo(Handle<SharedFunctionInfo> shared) {
- DCHECK(HasDebugInfo(shared));
- return Handle<DebugInfo>(DebugInfo::cast(shared->debug_info()));
-}
-
-
bool Debug::SetBreakPoint(Handle<JSFunction> function,
Handle<Object> break_point_object,
int* source_position) {
HandleScope scope(isolate_);
- PrepareForBreakPoints();
-
// Make sure the function is compiled and has set up the debug info.
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
@@ -935,13 +680,13 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
return true;
}
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo());
// Source positions starts with zero.
DCHECK(*source_position >= 0);
// Find the break point and change it.
BreakLocation location = BreakLocation::FromPosition(
- debug_info, SOURCE_BREAK_LOCATIONS, *source_position, STATEMENT_ALIGNED);
+ debug_info, ALL_BREAK_LOCATIONS, *source_position, STATEMENT_ALIGNED);
*source_position = location.statement_position();
location.SetBreakPoint(break_point_object);
@@ -956,8 +701,6 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
BreakPositionAlignment alignment) {
HandleScope scope(isolate_);
- PrepareForBreakPoints();
-
// Obtain shared function info for the function.
Handle<Object> result =
FindSharedFunctionInfoInScript(script, *source_position);
@@ -979,13 +722,13 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
position = *source_position - shared->start_position();
}
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo());
// Source positions starts with zero.
DCHECK(position >= 0);
// Find the break point and change it.
BreakLocation location = BreakLocation::FromPosition(
- debug_info, SOURCE_BREAK_LOCATIONS, position, alignment);
+ debug_info, ALL_BREAK_LOCATIONS, position, alignment);
location.SetBreakPoint(break_point_object);
position = (alignment == STATEMENT_ALIGNED) ? location.statement_position()
@@ -1017,7 +760,7 @@ void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
break_point_info->code_position()->value();
BreakLocation location =
- BreakLocation::FromAddress(debug_info, SOURCE_BREAK_LOCATIONS, pc);
+ BreakLocation::FromAddress(debug_info, ALL_BREAK_LOCATIONS, pc);
location.ClearBreakPoint(break_point_object);
// If there are no more break points left remove the debug info for this
@@ -1053,11 +796,6 @@ void Debug::ClearAllBreakPoints() {
void Debug::FloodWithOneShot(Handle<JSFunction> function,
BreakLocatorType type) {
- // Do not ever break in native and extension functions.
- if (!function->IsSubjectToDebugging()) return;
-
- PrepareForBreakPoints();
-
// Make sure the function is compiled and has set up the debug info.
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
@@ -1066,8 +804,8 @@ void Debug::FloodWithOneShot(Handle<JSFunction> function,
}
// Flood the function with break points.
- for (BreakLocation::Iterator it(GetDebugInfo(shared), type); !it.Done();
- it.Next()) {
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo());
+ for (BreakLocation::Iterator it(debug_info, type); !it.Done(); it.Next()) {
it.GetBreakLocation().SetOneShot();
}
}
@@ -1078,8 +816,7 @@ void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex),
isolate_);
- if (!bindee.is_null() && bindee->IsJSFunction() &&
- JSFunction::cast(*bindee)->IsSubjectToDebugging()) {
+ if (!bindee.is_null() && bindee->IsJSFunction()) {
Handle<JSFunction> bindee_function(JSFunction::cast(*bindee));
FloodWithOneShotGeneric(bindee_function);
}
@@ -1164,13 +901,18 @@ bool Debug::IsBreakOnException(ExceptionBreakType type) {
}
+FrameSummary GetFirstFrameSummary(JavaScriptFrame* frame) {
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ frame->Summarize(&frames);
+ return frames.first();
+}
+
+
void Debug::PrepareStep(StepAction step_action,
int step_count,
StackFrame::Id frame_id) {
HandleScope scope(isolate_);
- PrepareForBreakPoints();
-
DCHECK(in_debug_scope());
// Remember this step action and count.
@@ -1215,22 +957,18 @@ void Debug::PrepareStep(StepAction step_action,
return;
}
- List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- frames_it.frame()->Summarize(&frames);
- FrameSummary summary = frames.first();
-
// Get the debug info (create it if it does not exist).
+ FrameSummary summary = GetFirstFrameSummary(frame);
Handle<JSFunction> function(summary.function());
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
// Return if ensuring debug info failed.
return;
}
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
- // Compute whether or not the target is a call target.
- bool is_at_restarted_function = false;
- Handle<Code> call_function_stub;
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo());
+ // Refresh frame summary if the code has been recompiled for debugging.
+ if (shared->code() != *summary.code()) summary = GetFirstFrameSummary(frame);
// PC points to the instruction after the current one, possibly a break
// location as well. So the "- 1" to exclude it from the search.
@@ -1238,39 +976,15 @@ void Debug::PrepareStep(StepAction step_action,
BreakLocation location =
BreakLocation::FromAddress(debug_info, ALL_BREAK_LOCATIONS, call_pc);
- if (thread_local_.restarter_frame_function_pointer_ == NULL) {
- if (location.IsCodeTarget()) {
- Handle<Code> target_code = location.CodeTarget();
-
- // Check if target code is CallFunction stub.
- Handle<Code> maybe_call_function_stub = target_code;
- // If there is a breakpoint at this line look at the original code to
- // check if it is a CallFunction stub.
- if (location.IsDebugBreak()) {
- maybe_call_function_stub = location.OriginalCodeTarget();
- }
- if ((maybe_call_function_stub->kind() == Code::STUB &&
- CodeStub::GetMajorKey(*maybe_call_function_stub) ==
- CodeStub::CallFunction) ||
- maybe_call_function_stub->is_call_stub()) {
- // Save reference to the code as we may need it to find out arguments
- // count for 'step in' later.
- call_function_stub = maybe_call_function_stub;
- }
- }
- } else {
- is_at_restarted_function = true;
- }
-
// If this is the last break code target step out is the only possibility.
- if (location.IsExit() || step_action == StepOut) {
+ if (location.IsReturn() || step_action == StepOut) {
if (step_action == StepOut) {
// Skip step_count frames starting with the current one.
while (step_count-- > 0 && !frames_it.done()) {
frames_it.Advance();
}
} else {
- DCHECK(location.IsExit());
+ DCHECK(location.IsReturn());
frames_it.Advance();
}
// Skip native and extension functions on the stack.
@@ -1293,28 +1007,11 @@ void Debug::PrepareStep(StepAction step_action,
if (step_action != StepNext && step_action != StepMin) {
// If there's restarter frame on top of the stack, just get the pointer
// to function which is going to be restarted.
- if (is_at_restarted_function) {
+ if (thread_local_.restarter_frame_function_pointer_ != NULL) {
Handle<JSFunction> restarted_function(
JSFunction::cast(*thread_local_.restarter_frame_function_pointer_));
FloodWithOneShot(restarted_function);
- } else if (!call_function_stub.is_null()) {
- // If it's CallFunction stub ensure target function is compiled and flood
- // it with one shot breakpoints.
- bool is_call_ic = call_function_stub->kind() == Code::CALL_IC;
-
- // Find out number of arguments from the stub minor key.
- uint32_t key = call_function_stub->stub_key();
- // Argc in the stub is the number of arguments passed - not the
- // expected arguments of the called function.
- int call_function_arg_count = is_call_ic
- ? CallICStub::ExtractArgcFromMinorKey(CodeStub::MinorKeyFromKey(key))
- : CallFunctionStub::ExtractArgcFromMinorKey(
- CodeStub::MinorKeyFromKey(key));
-
- DCHECK(is_call_ic ||
- CodeStub::GetMajorKey(*call_function_stub) ==
- CodeStub::MajorKeyFromKey(key));
-
+ } else if (location.IsCall()) {
// Find target function on the expression stack.
// Expression stack looks like this (top to bottom):
// argN
@@ -1322,10 +1019,10 @@ void Debug::PrepareStep(StepAction step_action,
// arg0
// Receiver
// Function to call
- int expressions_count = frame->ComputeExpressionsCount();
- DCHECK(expressions_count - 2 - call_function_arg_count >= 0);
- Object* fun = frame->GetExpression(
- expressions_count - 2 - call_function_arg_count);
+ int num_expressions_without_args =
+ frame->ComputeExpressionsCount() - location.CallArgumentsCount();
+ DCHECK(num_expressions_without_args >= 2);
+ Object* fun = frame->GetExpression(num_expressions_without_args - 2);
// Flood the actual target of call/apply.
if (fun->IsJSFunction()) {
@@ -1338,10 +1035,9 @@ void Debug::PrepareStep(StepAction step_action,
while (fun->IsJSFunction()) {
Code* code = JSFunction::cast(fun)->shared()->code();
if (code != apply && code != call) break;
- DCHECK(expressions_count - i - call_function_arg_count >= 0);
- fun = frame->GetExpression(expressions_count - i -
- call_function_arg_count);
- i -= 1;
+ DCHECK(num_expressions_without_args >= i);
+ fun = frame->GetExpression(num_expressions_without_args - i);
+ i--;
}
}
@@ -1351,7 +1047,7 @@ void Debug::PrepareStep(StepAction step_action,
}
}
- ActivateStepIn(function, frame);
+ ActivateStepIn(frame);
}
// Fill the current function with one-shot break points even for step in on
@@ -1394,7 +1090,7 @@ bool Debug::StepNextContinue(BreakLocation* break_location,
// statement is hit.
if (step_action == StepNext || step_action == StepIn) {
// Never continue if returning from function.
- if (break_location->IsExit()) return false;
+ if (break_location->IsReturn()) return false;
// Continue if we are still on the same frame and in the same statement.
int current_statement_position =
@@ -1412,7 +1108,7 @@ bool Debug::StepNextContinue(BreakLocation* break_location,
// object.
bool Debug::IsDebugBreak(Address addr) {
Code* code = Code::GetCodeFromTargetAddress(addr);
- return code->is_debug_stub() && code->extra_ic_state() == DEBUG_BREAK;
+ return code->is_debug_stub();
}
@@ -1422,10 +1118,10 @@ Handle<Object> Debug::GetSourceBreakLocations(
BreakPositionAlignment position_alignment) {
Isolate* isolate = shared->GetIsolate();
Heap* heap = isolate->heap();
- if (!HasDebugInfo(shared)) {
+ if (!shared->HasDebugInfo()) {
return Handle<Object>(heap->undefined_value(), isolate);
}
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo());
if (debug_info->GetBreakPointCount() == 0) {
return Handle<Object>(heap->undefined_value(), isolate);
}
@@ -1509,12 +1205,8 @@ void Debug::ClearOneShot() {
}
-void Debug::ActivateStepIn(Handle<JSFunction> function, StackFrame* frame) {
+void Debug::ActivateStepIn(StackFrame* frame) {
DCHECK(!StepOutActive());
- // Make sure IC state is clean. This is so that we correct flood
- // accessor pairs when stepping in.
- function->code()->ClearInlineCaches();
- function->shared()->feedback_vector()->ClearICSlots(function->shared());
thread_local_.step_into_fp_ = frame->UnpaddedFP();
}
@@ -1542,388 +1234,199 @@ void Debug::ClearStepNext() {
}
-static void CollectActiveFunctionsFromThread(
- Isolate* isolate,
- ThreadLocalTop* top,
- List<Handle<JSFunction> >* active_functions,
- Object* active_code_marker) {
- // Find all non-optimized code functions with activation frames
- // on the stack. This includes functions which have optimized
- // activations (including inlined functions) on the stack as the
- // non-optimized code is needed for the lazy deoptimization.
- for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- if (frame->is_optimized()) {
- List<JSFunction*> functions(FLAG_max_inlining_levels + 1);
- frame->GetFunctions(&functions);
- for (int i = 0; i < functions.length(); i++) {
- JSFunction* function = functions[i];
- active_functions->Add(Handle<JSFunction>(function));
- function->shared()->code()->set_gc_metadata(active_code_marker);
- }
- } else if (frame->function()->IsJSFunction()) {
- JSFunction* function = frame->function();
- DCHECK(frame->LookupCode()->kind() == Code::FUNCTION);
- active_functions->Add(Handle<JSFunction>(function));
- function->shared()->code()->set_gc_metadata(active_code_marker);
- }
- }
+bool MatchingCodeTargets(Code* target1, Code* target2) {
+ if (target1 == target2) return true;
+ if (target1->kind() != target2->kind()) return false;
+ return target1->is_handler() || target1->is_inline_cache_stub();
}
-// Figure out how many bytes of "pc_offset" correspond to actual code by
-// subtracting off the bytes that correspond to constant/veneer pools. See
-// Assembler::CheckConstPool() and Assembler::CheckVeneerPool(). Note that this
-// is only useful for architectures using constant pools or veneer pools.
-static int ComputeCodeOffsetFromPcOffset(Code *code, int pc_offset) {
- DCHECK_EQ(code->kind(), Code::FUNCTION);
- DCHECK(!code->has_debug_break_slots());
- DCHECK_LE(0, pc_offset);
- DCHECK_LT(pc_offset, code->instruction_end() - code->instruction_start());
-
- int mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
- RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
- byte *pc = code->instruction_start() + pc_offset;
- int code_offset = pc_offset;
- for (RelocIterator it(code, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->pc() >= pc) break;
- DCHECK(RelocInfo::IsConstPool(info->rmode()));
- code_offset -= static_cast<int>(info->data());
- DCHECK_LE(0, code_offset);
- }
-
- return code_offset;
-}
-
+// Count the number of calls before the current frame PC to find the
+// corresponding PC in the newly recompiled code.
+static Address ComputeNewPcForRedirect(Code* new_code, Code* old_code,
+ Address old_pc) {
+ DCHECK_EQ(old_code->kind(), Code::FUNCTION);
+ DCHECK_EQ(new_code->kind(), Code::FUNCTION);
+ DCHECK(new_code->has_debug_break_slots());
+ static const int mask = RelocInfo::kCodeTargetMask;
-// The inverse of ComputeCodeOffsetFromPcOffset.
-static int ComputePcOffsetFromCodeOffset(Code *code, int code_offset) {
- DCHECK_EQ(code->kind(), Code::FUNCTION);
+ // Find the target of the current call.
+ Code* target = NULL;
+ intptr_t delta = 0;
+ for (RelocIterator it(old_code, mask); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Address current_pc = rinfo->pc();
+ // The frame PC is behind the call instruction by the call instruction size.
+ if (current_pc > old_pc) break;
+ delta = old_pc - current_pc;
+ target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ }
- int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
- RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
- RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
- int reloc = 0;
- for (RelocIterator it(code, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->pc() - code->instruction_start() - reloc >= code_offset) break;
- if (RelocInfo::IsDebugBreakSlot(info->rmode())) {
- reloc += Assembler::kDebugBreakSlotLength;
- } else {
- DCHECK(RelocInfo::IsConstPool(info->rmode()));
- reloc += static_cast<int>(info->data());
- }
+ // Count the number of calls to the same target before the current call.
+ int index = 0;
+ for (RelocIterator it(old_code, mask); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Address current_pc = rinfo->pc();
+ if (current_pc > old_pc) break;
+ Code* current = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ if (MatchingCodeTargets(target, current)) index++;
}
- int pc_offset = code_offset + reloc;
+ DCHECK(index > 0);
- DCHECK_LT(code->instruction_start() + pc_offset, code->instruction_end());
+ // Repeat the count on the new code to find corresponding call.
+ for (RelocIterator it(new_code, mask); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Code* current = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ if (MatchingCodeTargets(target, current)) index--;
+ if (index == 0) return rinfo->pc() + delta;
+ }
- return pc_offset;
+ UNREACHABLE();
+ return NULL;
}
-static void RedirectActivationsToRecompiledCodeOnThread(
- Isolate* isolate,
- ThreadLocalTop* top) {
- for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
-
- if (frame->is_optimized() || !frame->function()->IsJSFunction()) continue;
-
- JSFunction* function = frame->function();
-
- DCHECK(frame->LookupCode()->kind() == Code::FUNCTION);
-
- Handle<Code> frame_code(frame->LookupCode());
- if (frame_code->has_debug_break_slots()) continue;
-
- Handle<Code> new_code(function->shared()->code());
- if (new_code->kind() != Code::FUNCTION ||
- !new_code->has_debug_break_slots()) {
- continue;
- }
-
- int old_pc_offset =
- static_cast<int>(frame->pc() - frame_code->instruction_start());
- int code_offset = ComputeCodeOffsetFromPcOffset(*frame_code, old_pc_offset);
- int new_pc_offset = ComputePcOffsetFromCodeOffset(*new_code, code_offset);
-
- // Compute the equivalent pc in the new code.
- byte* new_pc = new_code->instruction_start() + new_pc_offset;
-
- if (FLAG_trace_deopt) {
- PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
- "with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
- "for debugging, "
- "changing pc from %08" V8PRIxPTR " to %08" V8PRIxPTR "\n",
- reinterpret_cast<intptr_t>(
- frame_code->instruction_start()),
- reinterpret_cast<intptr_t>(
- frame_code->instruction_start()) +
- frame_code->instruction_size(),
- frame_code->instruction_size(),
- reinterpret_cast<intptr_t>(new_code->instruction_start()),
- reinterpret_cast<intptr_t>(new_code->instruction_start()) +
- new_code->instruction_size(),
- new_code->instruction_size(),
- reinterpret_cast<intptr_t>(frame->pc()),
- reinterpret_cast<intptr_t>(new_pc));
- }
+// Count the number of continuations at which the current pc offset is at.
+static int ComputeContinuationIndexFromPcOffset(Code* code, int pc_offset) {
+ DCHECK_EQ(code->kind(), Code::FUNCTION);
+ Address pc = code->instruction_start() + pc_offset;
+ int mask = RelocInfo::ModeMask(RelocInfo::GENERATOR_CONTINUATION);
+ int index = 0;
+ for (RelocIterator it(code, mask); !it.done(); it.next()) {
+ index++;
+ RelocInfo* rinfo = it.rinfo();
+ Address current_pc = rinfo->pc();
+ if (current_pc == pc) break;
+ DCHECK(current_pc < pc);
+ }
+ return index;
+}
- if (FLAG_enable_embedded_constant_pool) {
- // Update constant pool pointer for new code.
- frame->set_constant_pool(new_code->constant_pool());
- }
- // Patch the return address to return into the code with
- // debug break slots.
- frame->set_pc(new_pc);
- }
+// Find the pc offset for the given continuation index.
+static int ComputePcOffsetFromContinuationIndex(Code* code, int index) {
+ DCHECK_EQ(code->kind(), Code::FUNCTION);
+ DCHECK(code->has_debug_break_slots());
+ int mask = RelocInfo::ModeMask(RelocInfo::GENERATOR_CONTINUATION);
+ RelocIterator it(code, mask);
+ for (int i = 1; i < index; i++) it.next();
+ return static_cast<int>(it.rinfo()->pc() - code->instruction_start());
}
-class ActiveFunctionsCollector : public ThreadVisitor {
+class RedirectActiveFunctions : public ThreadVisitor {
public:
- explicit ActiveFunctionsCollector(List<Handle<JSFunction> >* active_functions,
- Object* active_code_marker)
- : active_functions_(active_functions),
- active_code_marker_(active_code_marker) { }
-
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- CollectActiveFunctionsFromThread(isolate,
- top,
- active_functions_,
- active_code_marker_);
+ explicit RedirectActiveFunctions(SharedFunctionInfo* shared)
+ : shared_(shared) {
+ DCHECK(shared->HasDebugCode());
}
- private:
- List<Handle<JSFunction> >* active_functions_;
- Object* active_code_marker_;
-};
-
-
-class ActiveFunctionsRedirector : public ThreadVisitor {
- public:
void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- RedirectActivationsToRecompiledCodeOnThread(isolate, top);
- }
-};
+ for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+ JavaScriptFrame* frame = it.frame();
+ JSFunction* function = frame->function();
+ if (frame->is_optimized()) continue;
+ if (!function->Inlines(shared_)) continue;
+
+ Code* frame_code = frame->LookupCode();
+ DCHECK(frame_code->kind() == Code::FUNCTION);
+ if (frame_code->has_debug_break_slots()) continue;
+
+ Code* new_code = function->shared()->code();
+ Address old_pc = frame->pc();
+ Address new_pc = ComputeNewPcForRedirect(new_code, frame_code, old_pc);
+
+ if (FLAG_trace_deopt) {
+ PrintF("Replacing pc for debugging: %08" V8PRIxPTR " => %08" V8PRIxPTR
+ "\n",
+ reinterpret_cast<intptr_t>(old_pc),
+ reinterpret_cast<intptr_t>(new_pc));
+ }
+ if (FLAG_enable_embedded_constant_pool) {
+ // Update constant pool pointer for new code.
+ frame->set_constant_pool(new_code->constant_pool());
+ }
-static void EnsureFunctionHasDebugBreakSlots(Handle<JSFunction> function) {
- if (function->code()->kind() == Code::FUNCTION &&
- function->code()->has_debug_break_slots()) {
- // Nothing to do. Function code already had debug break slots.
- return;
- }
- // Make sure that the shared full code is compiled with debug
- // break slots.
- if (!function->shared()->code()->has_debug_break_slots()) {
- MaybeHandle<Code> code = Compiler::GetDebugCode(function);
- // Recompilation can fail. In that case leave the code as it was.
- if (!code.is_null()) function->ReplaceCode(*code.ToHandleChecked());
- } else {
- // Simply use shared code if it has debug break slots.
- function->ReplaceCode(function->shared()->code());
+ // Patch the return address to return into the code with
+ // debug break slots.
+ frame->set_pc(new_pc);
+ }
}
-}
+ private:
+ SharedFunctionInfo* shared_;
+ DisallowHeapAllocation no_gc_;
+};
-static void RecompileAndRelocateSuspendedGenerators(
- const List<Handle<JSGeneratorObject> > &generators) {
- for (int i = 0; i < generators.length(); i++) {
- Handle<JSFunction> fun(generators[i]->function());
- EnsureFunctionHasDebugBreakSlots(fun);
+bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
+ DCHECK(shared->is_compiled());
- int code_offset = generators[i]->continuation();
- int pc_offset = ComputePcOffsetFromCodeOffset(fun->code(), code_offset);
- generators[i]->set_continuation(pc_offset);
+ if (isolate_->concurrent_recompilation_enabled()) {
+ isolate_->optimizing_compile_dispatcher()->Flush();
}
-}
+ List<Handle<JSFunction> > functions;
+ List<Handle<JSGeneratorObject> > suspended_generators;
-static bool SkipSharedFunctionInfo(SharedFunctionInfo* shared,
- Object* active_code_marker) {
- if (!shared->allows_lazy_compilation()) return true;
- Object* script = shared->script();
- if (!script->IsScript()) return true;
- if (Script::cast(script)->type()->value() == Script::TYPE_NATIVE) return true;
- Code* shared_code = shared->code();
- return shared_code->gc_metadata() == active_code_marker;
-}
-
-
-static inline bool HasDebugBreakSlots(Code* code) {
- return code->kind() == Code::FUNCTION && code->has_debug_break_slots();
-}
-
-
-void Debug::PrepareForBreakPoints() {
- // If preparing for the first break point make sure to deoptimize all
- // functions as debugging does not work with optimized code.
- if (!has_break_points_) {
- if (isolate_->concurrent_recompilation_enabled()) {
- isolate_->optimizing_compile_dispatcher()->Flush();
- }
-
- Deoptimizer::DeoptimizeAll(isolate_);
-
- Handle<Code> lazy_compile = isolate_->builtins()->CompileLazy();
-
- // There will be at least one break point when we are done.
- has_break_points_ = true;
-
- // Keep the list of activated functions in a handlified list as it
- // is used both in GC and non-GC code.
- List<Handle<JSFunction> > active_functions(100);
-
- // A list of all suspended generators.
- List<Handle<JSGeneratorObject> > suspended_generators;
+ if (!shared->optimized_code_map()->IsSmi()) {
+ shared->ClearOptimizedCodeMap();
+ }
- // A list of all generator functions. We need to recompile all functions,
- // but we don't know until after visiting the whole heap which generator
- // functions have suspended activations and which do not. As in the case of
- // functions with activations on the stack, we need to be careful with
- // generator functions with suspended activations because although they
- // should be recompiled, recompilation can fail, and we need to avoid
- // leaving the heap in an inconsistent state.
- //
- // We could perhaps avoid this list and instead re-use the GC metadata
- // links.
- List<Handle<JSFunction> > generator_functions;
+ // Make sure we abort incremental marking.
+ isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "prepare for break points");
- {
- // We are going to iterate heap to find all functions without
- // debug break slots.
- Heap* heap = isolate_->heap();
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "preparing for breakpoints");
- HeapIterator iterator(heap);
-
- // Ensure no GC in this scope as we are going to use gc_metadata
- // field in the Code object to mark active functions.
- DisallowHeapAllocation no_allocation;
-
- Object* active_code_marker = heap->the_hole_value();
-
- CollectActiveFunctionsFromThread(isolate_,
- isolate_->thread_local_top(),
- &active_functions,
- active_code_marker);
- ActiveFunctionsCollector active_functions_collector(&active_functions,
- active_code_marker);
- isolate_->thread_manager()->IterateArchivedThreads(
- &active_functions_collector);
-
- // Scan the heap for all non-optimized functions which have no
- // debug break slots and are not active or inlined into an active
- // function and mark them for lazy compilation.
- HeapObject* obj = NULL;
- while (((obj = iterator.next()) != NULL)) {
- if (obj->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(obj);
- SharedFunctionInfo* shared = function->shared();
- if (SkipSharedFunctionInfo(shared, active_code_marker)) continue;
- if (shared->is_generator()) {
- generator_functions.Add(Handle<JSFunction>(function, isolate_));
- continue;
- }
- if (HasDebugBreakSlots(function->code())) continue;
- Code* fallback = HasDebugBreakSlots(shared->code()) ? shared->code()
- : *lazy_compile;
- Code::Kind kind = function->code()->kind();
- if (kind == Code::FUNCTION ||
- (kind == Code::BUILTIN && // Abort in-flight compilation.
- (function->IsInOptimizationQueue() ||
- function->IsMarkedForOptimization() ||
- function->IsMarkedForConcurrentOptimization()))) {
- function->ReplaceCode(fallback);
- }
- if (kind == Code::OPTIMIZED_FUNCTION) {
- // Optimized code can only get here if DeoptimizeAll did not
- // deoptimize turbo fan code.
- DCHECK(!FLAG_turbo_asm_deoptimization);
- DCHECK(function->shared()->asm_function());
- DCHECK(function->code()->is_turbofanned());
- function->ReplaceCode(fallback);
- }
- } else if (obj->IsJSGeneratorObject()) {
- JSGeneratorObject* gen = JSGeneratorObject::cast(obj);
- if (!gen->is_suspended()) continue;
-
- JSFunction* fun = gen->function();
- DCHECK_EQ(fun->code()->kind(), Code::FUNCTION);
- if (fun->code()->has_debug_break_slots()) continue;
-
- int pc_offset = gen->continuation();
- DCHECK_LT(0, pc_offset);
-
- int code_offset =
- ComputeCodeOffsetFromPcOffset(fun->code(), pc_offset);
-
- // This will be fixed after we recompile the functions.
- gen->set_continuation(code_offset);
-
- suspended_generators.Add(Handle<JSGeneratorObject>(gen, isolate_));
- } else if (obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
- if (SkipSharedFunctionInfo(shared, active_code_marker)) continue;
- if (shared->is_generator()) continue;
- if (HasDebugBreakSlots(shared->code())) continue;
- shared->ReplaceCode(*lazy_compile);
+ {
+ HeapIterator iterator(isolate_->heap());
+ HeapObject* obj;
+ bool include_generators = shared->is_generator();
+
+ while ((obj = iterator.next())) {
+ if (obj->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(obj);
+ if (!function->Inlines(*shared)) continue;
+ if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) {
+ Deoptimizer::DeoptimizeFunction(function);
}
- }
-
- // Clear gc_metadata field.
- for (int i = 0; i < active_functions.length(); i++) {
- Handle<JSFunction> function = active_functions[i];
- function->shared()->code()->set_gc_metadata(Smi::FromInt(0));
+ if (function->shared() == *shared) functions.Add(handle(function));
+ } else if (include_generators && obj->IsJSGeneratorObject()) {
+ JSGeneratorObject* generator_obj = JSGeneratorObject::cast(obj);
+ if (!generator_obj->is_suspended()) continue;
+ JSFunction* function = generator_obj->function();
+ if (!function->Inlines(*shared)) continue;
+ int pc_offset = generator_obj->continuation();
+ int index =
+ ComputeContinuationIndexFromPcOffset(function->code(), pc_offset);
+ generator_obj->set_continuation(index);
+ suspended_generators.Add(handle(generator_obj));
}
}
+ }
- // Recompile generator functions that have suspended activations, and
- // relocate those activations.
- RecompileAndRelocateSuspendedGenerators(suspended_generators);
-
- // Mark generator functions that didn't have suspended activations for lazy
- // recompilation. Note that this set does not include any active functions.
- for (int i = 0; i < generator_functions.length(); i++) {
- Handle<JSFunction> &function = generator_functions[i];
- if (function->code()->kind() != Code::FUNCTION) continue;
- if (function->code()->has_debug_break_slots()) continue;
- function->ReplaceCode(*lazy_compile);
- function->shared()->ReplaceCode(*lazy_compile);
- }
+ if (!shared->HasDebugCode()) {
+ DCHECK(functions.length() > 0);
+ if (!Compiler::CompileDebugCode(functions.first())) return false;
+ }
- // Now recompile all functions with activation frames and and
- // patch the return address to run in the new compiled code. It could be
- // that some active functions were recompiled already by the suspended
- // generator recompilation pass above; a generator with suspended
- // activations could also have active activations. That's fine.
- for (int i = 0; i < active_functions.length(); i++) {
- Handle<JSFunction> function = active_functions[i];
- Handle<SharedFunctionInfo> shared(function->shared());
- if (!shared->allows_lazy_compilation()) {
- // Ignore functions that cannot be recompiled. Fortunately, those are
- // only ones that are not subject to debugging in the first place.
- DCHECK(!function->IsSubjectToDebugging());
- continue;
- }
- if (shared->code()->kind() == Code::BUILTIN) continue;
+ for (Handle<JSFunction> const function : functions) {
+ function->ReplaceCode(shared->code());
+ }
- EnsureFunctionHasDebugBreakSlots(function);
- }
+ for (Handle<JSGeneratorObject> const generator_obj : suspended_generators) {
+ int index = generator_obj->continuation();
+ int pc_offset = ComputePcOffsetFromContinuationIndex(shared->code(), index);
+ generator_obj->set_continuation(pc_offset);
+ }
- RedirectActivationsToRecompiledCodeOnThread(isolate_,
- isolate_->thread_local_top());
+ // Update PCs on the stack to point to recompiled code.
+ RedirectActiveFunctions redirect_visitor(*shared);
+ redirect_visitor.VisitThread(isolate_, isolate_->thread_local_top());
+ isolate_->thread_manager()->IterateArchivedThreads(&redirect_visitor);
- ActiveFunctionsRedirector active_functions_redirector;
- isolate_->thread_manager()->IterateArchivedThreads(
- &active_functions_redirector);
- }
+ return true;
}
@@ -1947,10 +1450,12 @@ class SharedFunctionInfoFinder {
if (current_candidate_ != NULL) {
if (current_start_position_ == start_position &&
shared->end_position() == current_candidate_->end_position()) {
+ // If we already have a matching closure, do not throw it away.
+ if (current_candidate_closure_ != NULL && closure == NULL) return;
// If a top-level function contains only one function
// declaration the source for the top-level and the function
// is the same. In that case prefer the non top-level function.
- if (shared->is_toplevel()) return;
+ if (!current_candidate_->is_toplevel() && shared->is_toplevel()) return;
} else if (start_position < current_start_position_ ||
current_candidate_->end_position() < shared->end_position()) {
return;
@@ -1975,20 +1480,12 @@ class SharedFunctionInfoFinder {
};
-template <typename C>
-bool Debug::CompileToRevealInnerFunctions(C* compilable) {
- HandleScope scope(isolate_);
- // Force compiling inner functions that require context.
- // TODO(yangguo): remove this hack.
- bool has_break_points = has_break_points_;
- has_break_points_ = true;
- Handle<C> compilable_handle(compilable);
- bool result = !Compiler::GetUnoptimizedCode(compilable_handle).is_null();
- has_break_points_ = has_break_points;
- return result;
-}
-
-
+// We need to find a SFI for a literal that may not yet have been compiled yet,
+// and there may not be a JSFunction referencing it. Find the SFI closest to
+// the given position, compile it to reveal possible inner SFIs and repeat.
+// While we are at this, also ensure code with debug break slots so that we do
+// not have to compile a SFI without JSFunction, which is paifu for those that
+// cannot be compiled without context (need to find outer compilable SFI etc.)
Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
int position) {
while (true) {
@@ -2008,19 +1505,20 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
}
shared = finder.Result();
if (shared == NULL) break;
- // We found it if it's already compiled.
- if (shared->is_compiled()) return handle(shared);
+ // We found it if it's already compiled and has debug code.
+ if (shared->HasDebugCode()) return handle(shared);
}
// If not, compile to reveal inner functions, if possible.
if (shared->allows_lazy_compilation_without_context()) {
- if (!CompileToRevealInnerFunctions(shared)) break;
+ HandleScope scope(isolate_);
+ if (!Compiler::CompileDebugCode(handle(shared))) break;
continue;
}
// If not possible, comb the heap for the best suitable compile target.
JSFunction* closure;
{
- HeapIterator it(isolate_->heap(), HeapIterator::kNoFiltering);
+ HeapIterator it(isolate_->heap());
SharedFunctionInfoFinder finder(position);
while (HeapObject* object = it.next()) {
JSFunction* candidate_closure = NULL;
@@ -2041,9 +1539,11 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
closure = finder.ResultClosure();
shared = finder.Result();
}
- if (closure == NULL ? !CompileToRevealInnerFunctions(shared)
- : !CompileToRevealInnerFunctions(closure)) {
- break;
+ HandleScope scope(isolate_);
+ if (closure == NULL) {
+ if (!Compiler::CompileDebugCode(handle(shared))) break;
+ } else {
+ if (!Compiler::CompileDebugCode(handle(closure))) break;
}
}
return isolate_->factory()->undefined_value();
@@ -2053,25 +1553,27 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
// Ensures the debug information is present for shared.
bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
Handle<JSFunction> function) {
- Isolate* isolate = shared->GetIsolate();
+ if (!shared->IsSubjectToDebugging()) return false;
// Return if we already have the debug info for shared.
- if (HasDebugInfo(shared)) {
- DCHECK(shared->is_compiled());
- return true;
- }
-
- // There will be at least one break point when we are done.
- has_break_points_ = true;
+ if (shared->HasDebugInfo()) return true;
- // Ensure function is compiled. Return false if this failed.
- if (!function.is_null() &&
- !Compiler::EnsureCompiled(function, CLEAR_EXCEPTION)) {
+ if (function.is_null()) {
+ DCHECK(shared->HasDebugCode());
+ } else if (!Compiler::EnsureCompiled(function, CLEAR_EXCEPTION)) {
return false;
}
+ if (!PrepareFunctionForBreakPoints(shared)) return false;
+
+ // Make sure IC state is clean. This is so that we correctly flood
+ // accessor pairs when stepping in.
+ shared->code()->ClearInlineCaches();
+ shared->feedback_vector()->ClearICSlots(*shared);
+
// Create the debug info object.
- Handle<DebugInfo> debug_info = isolate->factory()->NewDebugInfo(shared);
+ DCHECK(shared->HasDebugCode());
+ Handle<DebugInfo> debug_info = isolate_->factory()->NewDebugInfo(shared);
// Add debug info to the list.
DebugInfoListNode* node = new DebugInfoListNode(*debug_info);
@@ -2082,177 +1584,55 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
}
-void Debug::RemoveDebugInfo(DebugInfoListNode* prev, DebugInfoListNode* node) {
- // Unlink from list. If prev is NULL we are looking at the first element.
- if (prev == NULL) {
- debug_info_list_ = node->next();
- } else {
- prev->set_next(node->next());
- }
- delete node;
-
- // If there are no more debug info objects there are not more break
- // points.
- has_break_points_ = debug_info_list_ != NULL;
-}
-
+void Debug::RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info) {
+ HandleScope scope(isolate_);
+ Handle<SharedFunctionInfo> shared(debug_info->shared());
-void Debug::RemoveDebugInfo(DebugInfo** debug_info) {
- DCHECK(debug_info_list_ != NULL);
+ DCHECK_NOT_NULL(debug_info_list_);
// Run through the debug info objects to find this one and remove it.
DebugInfoListNode* prev = NULL;
DebugInfoListNode* current = debug_info_list_;
while (current != NULL) {
- if (current->debug_info().location() == debug_info) {
- RemoveDebugInfo(prev, current);
+ if (current->debug_info().is_identical_to(debug_info)) {
+ // Unlink from list. If prev is NULL we are looking at the first element.
+ if (prev == NULL) {
+ debug_info_list_ = current->next();
+ } else {
+ prev->set_next(current->next());
+ }
+ delete current;
+ shared->set_debug_info(isolate_->heap()->undefined_value());
return;
}
// Move to next in list.
prev = current;
current = current->next();
}
- UNREACHABLE();
-}
-
-void Debug::RemoveDebugInfo(DebugInfoListNode* node) {
- DCHECK(debug_info_list_ != NULL);
- // Run through the debug info objects to find this one and remove it.
- DebugInfoListNode* prev = NULL;
- DebugInfoListNode* current = debug_info_list_;
- while (current != NULL) {
- if (current == node) {
- RemoveDebugInfo(prev, node);
- return;
- }
- // Move to next in list.
- prev = current;
- current = current->next();
- }
UNREACHABLE();
}
-void Debug::RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info) {
- HandleScope scope(isolate_);
- Handle<SharedFunctionInfo> shared(debug_info->shared());
-
- RemoveDebugInfo(debug_info.location());
-
- shared->set_debug_info(isolate_->heap()->undefined_value());
-}
-
-
void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
after_break_target_ = NULL;
if (LiveEdit::SetAfterBreakTarget(this)) return; // LiveEdit did the job.
- HandleScope scope(isolate_);
- PrepareForBreakPoints();
-
- // Get the executing function in which the debug break occurred.
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
- Handle<SharedFunctionInfo> shared(function->shared());
- if (!EnsureDebugInfo(shared, function)) {
- // Return if we failed to retrieve the debug info.
- return;
- }
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
- Handle<Code> code(debug_info->code());
- Handle<Code> original_code(debug_info->original_code());
-#ifdef DEBUG
- // Get the code which is actually executing.
- Handle<Code> frame_code(frame->LookupCode());
- DCHECK(frame_code.is_identical_to(code));
-#endif
-
- // Find the call address in the running code. This address holds the call to
- // either a DebugBreakXXX or to the debug break return entry code if the
- // break point is still active after processing the break point.
- Address addr = Assembler::break_address_from_return_address(frame->pc());
-
- // Check if the location is at JS exit or debug break slot.
- bool at_js_return = false;
- bool break_at_js_return_active = false;
- bool at_debug_break_slot = false;
- RelocIterator it(debug_info->code());
- while (!it.done() && !at_js_return && !at_debug_break_slot) {
- if (RelocInfo::IsJSReturn(it.rinfo()->rmode())) {
- at_js_return = (it.rinfo()->pc() ==
- addr - Assembler::kPatchReturnSequenceAddressOffset);
- break_at_js_return_active = it.rinfo()->IsPatchedReturnSequence();
- }
- if (RelocInfo::IsDebugBreakSlot(it.rinfo()->rmode())) {
- at_debug_break_slot = (it.rinfo()->pc() ==
- addr - Assembler::kPatchDebugBreakSlotAddressOffset);
- }
- it.next();
- }
-
- // Handle the jump to continue execution after break point depending on the
- // break location.
- if (at_js_return) {
- // If the break point at return is still active jump to the corresponding
- // place in the original code. If not the break point was removed during
- // break point processing.
- if (break_at_js_return_active) {
- addr += original_code->instruction_start() - code->instruction_start();
- }
-
- // Move back to where the call instruction sequence started.
- after_break_target_ = addr - Assembler::kPatchReturnSequenceAddressOffset;
- } else if (at_debug_break_slot) {
- // Address of where the debug break slot starts.
- addr = addr - Assembler::kPatchDebugBreakSlotAddressOffset;
-
- // Continue just after the slot.
- after_break_target_ = addr + Assembler::kDebugBreakSlotLength;
- } else {
- addr = Assembler::target_address_from_return_address(frame->pc());
- if (IsDebugBreak(Assembler::target_address_at(addr, *code))) {
- // We now know that there is still a debug break call at the target
- // address, so the break point is still there and the original code will
- // hold the address to jump to in order to complete the call which is
- // replaced by a call to DebugBreakXXX.
-
- // Find the corresponding address in the original code.
- addr += original_code->instruction_start() - code->instruction_start();
-
- // Install jump to the call address in the original code. This will be the
- // call which was overwritten by the call to DebugBreakXXX.
- after_break_target_ = Assembler::target_address_at(addr, *original_code);
- } else {
- // There is no longer a break point present. Don't try to look in the
- // original code as the running code will have the right address. This
- // takes care of the case where the last break point is removed from the
- // function and therefore no "original code" is available.
- after_break_target_ = Assembler::target_address_at(addr, *code);
- }
- }
+ // Continue just after the slot.
+ after_break_target_ = frame->pc();
}
bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
HandleScope scope(isolate_);
- // If there are no break points this cannot be break at return, as
- // the debugger statement and stack guard debug break cannot be at
- // return.
- if (!has_break_points_) {
- return false;
- }
-
- PrepareForBreakPoints();
-
// Get the executing function in which the debug break occurred.
Handle<JSFunction> function(JSFunction::cast(frame->function()));
Handle<SharedFunctionInfo> shared(function->shared());
- if (!EnsureDebugInfo(shared, function)) {
- // Return if we failed to retrieve the debug info.
- return false;
- }
- Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+
+ // With no debug info there are no break points, so we can't be at a return.
+ if (!shared->HasDebugInfo()) return false;
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo());
Handle<Code> code(debug_info->code());
#ifdef DEBUG
// Get the code which is actually executing.
@@ -2260,17 +1640,11 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
DCHECK(frame_code.is_identical_to(code));
#endif
- // Find the call address in the running code.
- Address addr = Assembler::break_address_from_return_address(frame->pc());
-
- // Check if the location is at JS return.
- RelocIterator it(debug_info->code());
- while (!it.done()) {
- if (RelocInfo::IsJSReturn(it.rinfo()->rmode())) {
- return (it.rinfo()->pc() ==
- addr - Assembler::kPatchReturnSequenceAddressOffset);
- }
- it.next();
+ // Find the reloc info matching the start of the debug break slot.
+ Address slot_pc = frame->pc() - Assembler::kDebugBreakSlotLength;
+ int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
+ for (RelocIterator it(*code, mask); !it.done(); it.next()) {
+ if (it.rinfo()->pc() == slot_pc) return true;
}
return false;
}
@@ -2296,15 +1670,7 @@ bool Debug::IsDebugGlobal(GlobalObject* global) {
void Debug::ClearMirrorCache() {
PostponeInterruptsScope postpone(isolate_);
HandleScope scope(isolate_);
- AssertDebugContext();
- Factory* factory = isolate_->factory();
- Handle<GlobalObject> global(isolate_->global_object());
- JSObject::SetProperty(global,
- factory->NewStringFromAsciiChecked("next_handle_"),
- handle(Smi::FromInt(0), isolate_), SLOPPY).Check();
- JSObject::SetProperty(global,
- factory->NewStringFromAsciiChecked("mirror_cache_"),
- factory->NewJSArray(0, FAST_ELEMENTS), SLOPPY).Check();
+ CallFunction("ClearMirrorCache", 0, NULL);
}
@@ -2323,6 +1689,44 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
}
+void Debug::GetStepinPositions(JavaScriptFrame* frame, StackFrame::Id frame_id,
+ List<int>* results_out) {
+ FrameSummary summary = GetFirstFrameSummary(frame);
+
+ Handle<JSFunction> fun = Handle<JSFunction>(summary.function());
+ Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>(fun->shared());
+
+ if (!EnsureDebugInfo(shared, fun)) return;
+
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo());
+ // Refresh frame summary if the code has been recompiled for debugging.
+ if (shared->code() != *summary.code()) summary = GetFirstFrameSummary(frame);
+
+ // Find range of break points starting from the break point where execution
+ // has stopped.
+ Address call_pc = summary.pc() - 1;
+ List<BreakLocation> locations;
+ BreakLocation::FromAddressSameStatement(debug_info, ALL_BREAK_LOCATIONS,
+ call_pc, &locations);
+
+ for (BreakLocation location : locations) {
+ if (location.pc() <= summary.pc()) {
+ // The break point is near our pc. Could be a step-in possibility,
+ // that is currently taken by active debugger call.
+ if (break_frame_id() == StackFrame::NO_ID) {
+ continue; // We are not stepping.
+ } else {
+ JavaScriptFrameIterator frame_it(isolate_, break_frame_id());
+ // If our frame is a top frame and we are stepping, we can do step-in
+ // at this place.
+ if (frame_it.frame()->id() != frame_id) continue;
+ }
+ }
+ if (location.IsStepInLocation()) results_out->Add(location.position());
+ }
+}
+
+
void Debug::RecordEvalCaller(Handle<Script> script) {
script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
// For eval scripts add information on the function from which eval was
@@ -2338,29 +1742,10 @@ void Debug::RecordEvalCaller(Handle<Script> script) {
}
-MaybeHandle<Object> Debug::MakeJSObject(const char* constructor_name,
- int argc,
- Handle<Object> argv[]) {
- AssertDebugContext();
- // Create the execution state object.
- Handle<GlobalObject> global(isolate_->global_object());
- Handle<Object> constructor = Object::GetProperty(
- isolate_, global, constructor_name).ToHandleChecked();
- DCHECK(constructor->IsJSFunction());
- if (!constructor->IsJSFunction()) return MaybeHandle<Object>();
- // We do not handle interrupts here. In particular, termination interrupts.
- PostponeInterruptsScope no_interrupts(isolate_);
- return Execution::TryCall(Handle<JSFunction>::cast(constructor),
- handle(debug_context()->global_proxy()),
- argc,
- argv);
-}
-
-
MaybeHandle<Object> Debug::MakeExecutionState() {
// Create the execution state object.
Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()) };
- return MakeJSObject("MakeExecutionState", arraysize(argv), argv);
+ return CallFunction("MakeExecutionState", arraysize(argv), argv);
}
@@ -2368,7 +1753,7 @@ MaybeHandle<Object> Debug::MakeBreakEvent(Handle<Object> break_points_hit) {
// Create the new break event object.
Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()),
break_points_hit };
- return MakeJSObject("MakeBreakEvent", arraysize(argv), argv);
+ return CallFunction("MakeBreakEvent", arraysize(argv), argv);
}
@@ -2380,7 +1765,7 @@ MaybeHandle<Object> Debug::MakeExceptionEvent(Handle<Object> exception,
exception,
isolate_->factory()->ToBoolean(uncaught),
promise };
- return MakeJSObject("MakeExceptionEvent", arraysize(argv), argv);
+ return CallFunction("MakeExceptionEvent", arraysize(argv), argv);
}
@@ -2390,21 +1775,21 @@ MaybeHandle<Object> Debug::MakeCompileEvent(Handle<Script> script,
Handle<Object> script_wrapper = Script::GetWrapper(script);
Handle<Object> argv[] = { script_wrapper,
isolate_->factory()->NewNumberFromInt(type) };
- return MakeJSObject("MakeCompileEvent", arraysize(argv), argv);
+ return CallFunction("MakeCompileEvent", arraysize(argv), argv);
}
MaybeHandle<Object> Debug::MakePromiseEvent(Handle<JSObject> event_data) {
// Create the promise event object.
Handle<Object> argv[] = { event_data };
- return MakeJSObject("MakePromiseEvent", arraysize(argv), argv);
+ return CallFunction("MakePromiseEvent", arraysize(argv), argv);
}
MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<JSObject> task_event) {
// Create the async task event object.
Handle<Object> argv[] = { task_event };
- return MakeJSObject("MakeAsyncTaskEvent", arraysize(argv), argv);
+ return CallFunction("MakeAsyncTaskEvent", arraysize(argv), argv);
}
@@ -2438,10 +1823,7 @@ void Debug::OnPromiseReject(Handle<JSObject> promise, Handle<Object> value) {
MaybeHandle<Object> Debug::PromiseHasUserDefinedRejectHandler(
Handle<JSObject> promise) {
- Handle<JSFunction> fun = Handle<JSFunction>::cast(
- JSReceiver::GetDataProperty(isolate_->js_builtins_object(),
- isolate_->factory()->NewStringFromStaticChars(
- "$promiseHasUserDefinedRejectHandler")));
+ Handle<JSFunction> fun = isolate_->promise_has_user_defined_reject_handler();
return Execution::Call(isolate_, fun, promise, 0, NULL);
}
@@ -2493,6 +1875,7 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
void Debug::OnCompileError(Handle<Script> script) {
if (ignore_events()) return;
+ SuppressDebug while_processing(this);
if (in_debug_scope()) {
ProcessCompileEventInDebugScope(v8::CompileError, script);
@@ -2535,6 +1918,7 @@ void Debug::OnDebugBreak(Handle<Object> break_points_hit,
void Debug::OnBeforeCompile(Handle<Script> script) {
if (in_debug_scope() || ignore_events()) return;
+ SuppressDebug while_processing(this);
HandleScope scope(isolate_);
DebugScope debug_scope(this);
@@ -2559,6 +1943,7 @@ void Debug::OnAfterCompile(Handle<Script> script) {
if (script_cache_ != NULL) script_cache_->Add(script);
if (ignore_events()) return;
+ SuppressDebug while_processing(this);
if (in_debug_scope()) {
ProcessCompileEventInDebugScope(v8::AfterCompile, script);
@@ -2571,30 +1956,9 @@ void Debug::OnAfterCompile(Handle<Script> script) {
// If debugging there might be script break points registered for this
// script. Make sure that these break points are set.
-
- // Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
- Handle<String> update_script_break_points_string =
- isolate_->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("UpdateScriptBreakPoints"));
- Handle<GlobalObject> debug_global(debug_context()->global_object());
- Handle<Object> update_script_break_points =
- Object::GetProperty(
- debug_global, update_script_break_points_string).ToHandleChecked();
- if (!update_script_break_points->IsJSFunction()) {
- return;
- }
- DCHECK(update_script_break_points->IsJSFunction());
-
- // Wrap the script object in a proper JS object before passing it
- // to JavaScript.
- Handle<Object> wrapper = Script::GetWrapper(script);
-
- // Call UpdateScriptBreakPoints expect no exceptions.
- Handle<Object> argv[] = { wrapper };
- if (Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
- isolate_->js_builtins_object(),
- arraysize(argv),
- argv).is_null()) {
+ Handle<Object> argv[] = {Script::GetWrapper(script)};
+ if (CallFunction("UpdateScriptBreakPoints", arraysize(argv), argv)
+ .is_null()) {
return;
}
@@ -2676,6 +2040,9 @@ void Debug::CallEventCallback(v8::DebugEvent event,
Handle<Object> exec_state,
Handle<Object> event_data,
v8::Debug::ClientData* client_data) {
+ // Prevent other interrupts from triggering, for example API callbacks,
+ // while dispatching event listners.
+ PostponeInterruptsScope postpone(isolate_);
bool previous = in_debug_event_listener_;
in_debug_event_listener_ = true;
if (event_listener_->IsForeign()) {
@@ -2709,7 +2076,6 @@ void Debug::ProcessCompileEventInDebugScope(v8::DebugEvent event,
Handle<Script> script) {
if (event_listener_.is_null()) return;
- SuppressDebug while_processing(this);
DebugScope debug_scope(this);
if (debug_scope.failed()) return;
@@ -2990,19 +2356,8 @@ void Debug::HandleDebugBreak() {
bool debug_command_only = isolate_->stack_guard()->CheckDebugCommand() &&
!isolate_->stack_guard()->CheckDebugBreak();
- bool is_debugger_statement = !isolate_->stack_guard()->CheckDebugCommand() &&
- !isolate_->stack_guard()->CheckDebugBreak();
-
isolate_->stack_guard()->ClearDebugBreak();
- if (is_debugger_statement) {
- // If we have been called via 'debugger' Javascript statement,
- // we might not be prepared for breakpoints.
- // TODO(dslomov,yangguo): CheckDebugBreak may race with RequestDebugBreak.
- // Revisit this to clean-up.
- HandleScope handle_scope(isolate_);
- PrepareForBreakPoints();
- }
ProcessDebugMessages(debug_command_only);
}
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug/debug.h
index fd5f67d6f5..4b5b7b7b90 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -2,20 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DEBUG_H_
-#define V8_DEBUG_H_
+#ifndef V8_DEBUG_DEBUG_H_
+#define V8_DEBUG_DEBUG_H_
#include "src/allocation.h"
#include "src/arguments.h"
#include "src/assembler.h"
#include "src/base/atomicops.h"
#include "src/base/platform/platform.h"
+#include "src/debug/liveedit.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/flags.h"
-#include "src/frames-inl.h"
+#include "src/frames.h"
#include "src/hashmap.h"
-#include "src/liveedit.h"
+#include "src/runtime/runtime.h"
#include "src/string-stream.h"
#include "src/v8threads.h"
@@ -51,15 +52,11 @@ enum ExceptionBreakType {
// Type of exception break.
-enum BreakLocatorType {
- ALL_BREAK_LOCATIONS = 0,
- SOURCE_BREAK_LOCATIONS = 1,
- CALLS_AND_RETURNS = 2
-};
+enum BreakLocatorType { ALL_BREAK_LOCATIONS, CALLS_AND_RETURNS };
// The different types of breakpoint position alignments.
-// Must match Debug.BreakPositionAlignment in debug-debugger.js
+// Must match Debug.BreakPositionAlignment in debug.js
enum BreakPositionAlignment {
STATEMENT_ALIGNED = 0,
BREAK_POSITION_ALIGNED = 1
@@ -82,14 +79,20 @@ class BreakLocation {
BreakPositionAlignment alignment);
bool IsDebugBreak() const;
- inline bool IsExit() const { return RelocInfo::IsJSReturn(rmode_); }
+
+ inline bool IsReturn() const {
+ return RelocInfo::IsDebugBreakSlotAtReturn(rmode_);
+ }
+ inline bool IsCall() const {
+ return RelocInfo::IsDebugBreakSlotAtCall(rmode_);
+ }
inline bool IsConstructCall() const {
- return RelocInfo::IsConstructCall(rmode_);
+ return RelocInfo::IsDebugBreakSlotAtConstructCall(rmode_);
+ }
+ inline int CallArgumentsCount() const {
+ DCHECK(IsCall());
+ return RelocInfo::DebugBreakCallArgumentsCount(data_);
}
- inline bool IsCodeTarget() const { return RelocInfo::IsCodeTarget(rmode_); }
-
- Handle<Code> CodeTarget() const;
- Handle<Code> OriginalCodeTarget() const;
bool IsStepInLocation() const;
inline bool HasBreakPoint() const {
@@ -104,32 +107,22 @@ class BreakLocation {
void SetOneShot();
void ClearOneShot();
+
inline RelocInfo rinfo() const {
return RelocInfo(pc(), rmode(), data_, code());
}
- inline RelocInfo original_rinfo() const {
- return RelocInfo(original_pc(), original_rmode(), original_data_,
- original_code());
- }
-
inline int position() const { return position_; }
inline int statement_position() const { return statement_position_; }
inline Address pc() const { return code()->entry() + pc_offset_; }
- inline Address original_pc() const {
- return original_code()->entry() + original_pc_offset_;
- }
inline RelocInfo::Mode rmode() const { return rmode_; }
- inline RelocInfo::Mode original_rmode() const { return original_rmode_; }
inline Code* code() const { return debug_info_->code(); }
- inline Code* original_code() const { return debug_info_->original_code(); }
private:
- BreakLocation(Handle<DebugInfo> debug_info, RelocInfo* rinfo,
- RelocInfo* original_rinfo, int position,
+ BreakLocation(Handle<DebugInfo> debug_info, RelocInfo* rinfo, int position,
int statement_position);
class Iterator {
@@ -137,11 +130,11 @@ class BreakLocation {
Iterator(Handle<DebugInfo> debug_info, BreakLocatorType type);
BreakLocation GetBreakLocation() {
- return BreakLocation(debug_info_, rinfo(), original_rinfo(), position(),
+ return BreakLocation(debug_info_, rinfo(), position(),
statement_position());
}
- inline bool Done() const { return RinfoDone(); }
+ inline bool Done() const { return reloc_iterator_.done(); }
void Next();
void SkipTo(int count) {
@@ -149,31 +142,17 @@ class BreakLocation {
}
inline RelocInfo::Mode rmode() { return reloc_iterator_.rinfo()->rmode(); }
- inline RelocInfo::Mode original_rmode() {
- return reloc_iterator_.rinfo()->rmode();
- }
-
inline RelocInfo* rinfo() { return reloc_iterator_.rinfo(); }
- inline RelocInfo* original_rinfo() {
- return reloc_iterator_original_.rinfo();
- }
-
inline Address pc() { return rinfo()->pc(); }
- inline Address original_pc() { return original_rinfo()->pc(); }
-
int break_index() const { return break_index_; }
-
inline int position() const { return position_; }
inline int statement_position() const { return statement_position_; }
private:
- bool RinfoDone() const;
- void RinfoNext();
+ static int GetModeMask(BreakLocatorType type);
Handle<DebugInfo> debug_info_;
- BreakLocatorType type_;
RelocIterator reloc_iterator_;
- RelocIterator reloc_iterator_original_;
int break_index_;
int position_;
int statement_position_;
@@ -188,13 +167,8 @@ class BreakLocation {
static int BreakIndexFromAddress(Handle<DebugInfo> debug_info,
BreakLocatorType type, Address pc);
- void ClearDebugBreak();
- void RestoreFromOriginal(int length_in_bytes);
-
void SetDebugBreak();
- void SetDebugBreakAtReturn();
- void SetDebugBreakAtSlot();
- void SetDebugBreakAtIC();
+ void ClearDebugBreak();
inline bool IsDebuggerStatement() const {
return RelocInfo::IsDebuggerStatement(rmode_);
@@ -205,11 +179,8 @@ class BreakLocation {
Handle<DebugInfo> debug_info_;
int pc_offset_;
- int original_pc_offset_;
RelocInfo::Mode rmode_;
- RelocInfo::Mode original_rmode_;
intptr_t data_;
- intptr_t original_data_;
int position_;
int statement_position_;
};
@@ -243,14 +214,12 @@ class ScriptCache {
class DebugInfoListNode {
public:
explicit DebugInfoListNode(DebugInfo* debug_info);
- virtual ~DebugInfoListNode() { ClearInfo(); }
+ ~DebugInfoListNode();
DebugInfoListNode* next() { return next_; }
void set_next(DebugInfoListNode* next) { next_ = next; }
Handle<DebugInfo> debug_info() { return Handle<DebugInfo>(debug_info_); }
- void ClearInfo();
-
private:
// Global (weak) handle to the debug info object.
DebugInfo** debug_info_;
@@ -468,8 +437,10 @@ class Debug {
void HandleStepIn(Handle<Object> function_obj, bool is_constructor);
bool StepOutActive() { return thread_local_.step_out_fp_ != 0; }
- // Purge all code objects that have no debug break slots.
- void PrepareForBreakPoints();
+ void GetStepinPositions(JavaScriptFrame* frame, StackFrame::Id frame_id,
+ List<int>* results_out);
+
+ bool PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared);
// Returns whether the operation succeeded. Compilation can only be triggered
// if a valid closure is passed as the second argument, otherwise the shared
@@ -477,7 +448,6 @@ class Debug {
bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
Handle<JSFunction> function);
static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
- static bool HasDebugInfo(Handle<SharedFunctionInfo> shared);
template <typename C>
bool CompileToRevealInnerFunctions(C* compilable);
@@ -504,10 +474,6 @@ class Debug {
LiveEdit::FrameDropMode mode,
Object** restarter_frame_function_pointer);
- // Passed to MakeWeak.
- static void HandlePhantomDebugInfo(
- const PhantomCallbackData<DebugInfoListNode>& data);
-
// Threading support.
char* ArchiveDebug(char* to);
char* RestoreDebug(char* from);
@@ -528,6 +494,7 @@ class Debug {
base::NoBarrier_Load(&thread_local_.current_debug_scope_));
}
inline Handle<Context> debug_context() { return debug_context_; }
+
void set_live_edit_enabled(bool v) { live_edit_enabled_ = v; }
bool live_edit_enabled() const {
return FLAG_enable_liveedit && live_edit_enabled_ ;
@@ -535,7 +502,6 @@ class Debug {
inline bool is_active() const { return is_active_; }
inline bool is_loaded() const { return !debug_context_.is_null(); }
- inline bool has_break_points() const { return has_break_points_; }
inline bool in_debug_scope() const {
return !!base::NoBarrier_Load(&thread_local_.current_debug_scope_);
}
@@ -583,10 +549,6 @@ class Debug {
void OnException(Handle<Object> exception, Handle<Object> promise);
// Constructors for debug event objects.
- MUST_USE_RESULT MaybeHandle<Object> MakeJSObject(
- const char* constructor_name,
- int argc,
- Handle<Object> argv[]);
MUST_USE_RESULT MaybeHandle<Object> MakeExecutionState();
MUST_USE_RESULT MaybeHandle<Object> MakeBreakEvent(
Handle<Object> break_points_hit);
@@ -622,18 +584,16 @@ class Debug {
bool auto_continue);
void InvokeMessageHandler(MessageImpl message);
- static bool CompileDebuggerScript(Isolate* isolate, int index);
void ClearOneShot();
- void ActivateStepIn(Handle<JSFunction> function, StackFrame* frame);
+ void ActivateStepIn(StackFrame* frame);
void ClearStepIn();
void ActivateStepOut(StackFrame* frame);
void ClearStepNext();
void RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info);
- void RemoveDebugInfo(DebugInfo** debug_info);
- void RemoveDebugInfo(DebugInfoListNode* node);
- void RemoveDebugInfo(DebugInfoListNode* prev, DebugInfoListNode* node);
Handle<Object> CheckBreakPoints(Handle<Object> break_point);
bool CheckBreakPoint(Handle<Object> break_point_object);
+ MaybeHandle<Object> CallFunction(const char* name, int argc,
+ Handle<Object> args[]);
inline void AssertDebugContext() {
DCHECK(isolate_->context() == *debug_context());
@@ -735,9 +695,6 @@ class Debug {
};
-DECLARE_RUNTIME_FUNCTION(Debug_Break);
-
-
// This scope is used to load and enter the debug context and create a new
// break state. Leaving the scope will restore the previous state.
// On failure to load, FailedToEnter returns true.
@@ -806,18 +763,14 @@ class SuppressDebug BASE_EMBEDDED {
// Code generator routines.
class DebugCodegen : public AllStatic {
public:
- static void GenerateSlot(MacroAssembler* masm);
- static void GenerateCallICStubDebugBreak(MacroAssembler* masm);
- static void GenerateLoadICDebugBreak(MacroAssembler* masm);
- static void GenerateStoreICDebugBreak(MacroAssembler* masm);
- static void GenerateKeyedLoadICDebugBreak(MacroAssembler* masm);
- static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
- static void GenerateCompareNilICDebugBreak(MacroAssembler* masm);
- static void GenerateReturnDebugBreak(MacroAssembler* masm);
- static void GenerateCallFunctionStubDebugBreak(MacroAssembler* masm);
- static void GenerateCallConstructStubDebugBreak(MacroAssembler* masm);
- static void GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm);
- static void GenerateSlotDebugBreak(MacroAssembler* masm);
+ enum DebugBreakCallHelperMode {
+ SAVE_RESULT_REGISTER,
+ IGNORE_RESULT_REGISTER
+ };
+
+ static void GenerateDebugBreakStub(MacroAssembler* masm,
+ DebugBreakCallHelperMode mode);
+
static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
// FrameDropper is a code replacement for a JavaScript frame with possibly
@@ -825,9 +778,16 @@ class DebugCodegen : public AllStatic {
// There is no calling conventions here, because it never actually gets
// called, it only gets returned to.
static void GenerateFrameDropperLiveEdit(MacroAssembler* masm);
+
+
+ static void GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
+ int call_argc = -1);
+
+ static void PatchDebugBreakSlot(Address pc, Handle<Code> code);
+ static void ClearDebugBreakSlot(Address pc);
};
} } // namespace v8::internal
-#endif // V8_DEBUG_H_
+#endif // V8_DEBUG_DEBUG_H_
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug/debug.js
index 991af118a2..2e51d43088 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug/debug.js
@@ -1,8 +1,40 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+
+(function (global, utils) {
"use strict";
+// ----------------------------------------------------------------------------
+// Imports
+
+var FrameMirror = global.FrameMirror;
+var GlobalArray = global.Array;
+var GlobalRegExp = global.RegExp;
+var IsNaN = global.isNaN;
+var JSONParse = global.JSON.parse;
+var JSONStringify = global.JSON.stringify;
+var LookupMirror = global.LookupMirror;
+var MakeMirror = global.MakeMirror;
+var MakeMirrorSerializer = global.MakeMirrorSerializer;
+var MathMin = global.Math.min;
+var Mirror = global.Mirror;
+var MirrorType;
+var ParseInt = global.parseInt;
+var ToBoolean;
+var ToNumber;
+var ToString;
+var ValueMirror = global.ValueMirror;
+
+utils.Import(function(from) {
+ MirrorType = from.MirrorType;
+ ToBoolean = from.ToBoolean;
+ ToNumber = from.ToNumber;
+ ToString = from.ToString;
+});
+
+//----------------------------------------------------------------------------
+
// Default number of frames to include in the response to backtrace request.
var kDefaultBacktraceLength = 10;
@@ -202,8 +234,7 @@ BreakPoint.prototype.isTriggered = function(exec_state) {
try {
var mirror = exec_state.frame(0).evaluate(this.condition());
// If no sensible mirror or non true value break point not triggered.
- if (!(mirror instanceof ValueMirror) ||
- !builtins.$toBoolean(mirror.value_)) {
+ if (!(mirror instanceof ValueMirror) || !ToBoolean(mirror.value_)) {
return false;
}
} catch (e) {
@@ -247,9 +278,9 @@ function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
} else if (type == Debug.ScriptBreakPointType.ScriptName) {
this.script_name_ = script_id_or_name;
} else if (type == Debug.ScriptBreakPointType.ScriptRegExp) {
- this.script_regexp_object_ = new RegExp(script_id_or_name);
+ this.script_regexp_object_ = new GlobalRegExp(script_id_or_name);
} else {
- throw new Error("Unexpected breakpoint type " + type);
+ throw MakeError(kDebugger, "Unexpected breakpoint type " + type);
}
this.line_ = opt_line || 0;
this.column_ = opt_column;
@@ -396,7 +427,7 @@ ScriptBreakPoint.prototype.matchesScript = function(script) {
} else if (this.type_ == Debug.ScriptBreakPointType.ScriptRegExp) {
return this.script_regexp_object_.test(script.nameOrSourceURL());
} else {
- throw new Error("Unexpected breakpoint type " + this.type_);
+ throw MakeError(kDebugger, "Unexpected breakpoint type " + this.type_);
}
}
};
@@ -414,7 +445,7 @@ ScriptBreakPoint.prototype.set = function (script) {
// Allocate array for caching the columns where the actual source starts.
if (!script.sourceColumnStart_) {
- script.sourceColumnStart_ = new Array(script.lineCount());
+ script.sourceColumnStart_ = new GlobalArray(script.lineCount());
}
// Fill cache if needed and get column where the actual source starts.
@@ -493,14 +524,14 @@ function GetScriptBreakPoints(script) {
Debug.setListener = function(listener, opt_data) {
if (!IS_FUNCTION(listener) && !IS_UNDEFINED(listener) && !IS_NULL(listener)) {
- throw new Error('Parameters have wrong types.');
+ throw MakeTypeError(kDebuggerType);
}
%SetDebugEventListener(listener, opt_data);
};
Debug.breakLocations = function(f, opt_position_aligment) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+ if (!IS_FUNCTION(f)) throw MakeTypeError(kDebuggerType);
var position_aligment = IS_UNDEFINED(opt_position_aligment)
? Debug.BreakPositionAlignment.Statement : opt_position_aligment;
return %GetBreakLocations(f, position_aligment);
@@ -533,7 +564,7 @@ Debug.findScript = function(func_or_script_name) {
if (result_count == 1) {
return last_result;
} else {
- return undefined;
+ return UNDEFINED;
}
} else {
return %GetScript(func_or_script_name);
@@ -550,13 +581,13 @@ Debug.scriptSource = function(func_or_script_name) {
Debug.source = function(f) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+ if (!IS_FUNCTION(f)) throw MakeTypeError(kDebuggerType);
return %FunctionGetSourceCode(f);
};
Debug.sourcePosition = function(f) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+ if (!IS_FUNCTION(f)) throw MakeTypeError(kDebuggerType);
return %FunctionGetScriptSourcePosition(f);
};
@@ -610,10 +641,10 @@ Debug.findBreakPointActualLocations = function(break_point_number) {
};
Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
- if (!IS_FUNCTION(func)) throw new Error('Parameters have wrong types.');
+ if (!IS_FUNCTION(func)) throw MakeTypeError(kDebuggerType);
// Break points in API functions are not supported.
if (%FunctionIsAPIFunction(func)) {
- throw new Error('Cannot set break point in native code.');
+ throw MakeError(kDebugger, 'Cannot set break point in native code.');
}
// Find source position relative to start of the function
var break_position =
@@ -623,7 +654,7 @@ Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
var script = %FunctionGetScript(func);
// Break in builtin JavaScript code is not supported.
if (script.type == Debug.ScriptType.Native) {
- throw new Error('Cannot set break point in native code.');
+ throw MakeError(kDebugger, 'Cannot set break point in native code.');
}
// If the script for the function has a name convert this to a script break
// point.
@@ -700,9 +731,7 @@ Debug.changeBreakPointCondition = function(break_point_number, condition) {
Debug.changeBreakPointIgnoreCount = function(break_point_number, ignoreCount) {
- if (ignoreCount < 0) {
- throw new Error('Invalid argument');
- }
+ if (ignoreCount < 0) throw MakeError(kDebugger, 'Invalid argument');
var break_point = this.findBreakPoint(break_point_number, false);
break_point.setIgnoreCount(ignoreCount);
};
@@ -714,9 +743,7 @@ Debug.clearBreakPoint = function(break_point_number) {
return %ClearBreakPoint(break_point);
} else {
break_point = this.findScriptBreakPoint(break_point_number, true);
- if (!break_point) {
- throw new Error('Invalid breakpoint');
- }
+ if (!break_point) throw MakeError(kDebugger, 'Invalid breakpoint');
}
};
@@ -836,9 +863,7 @@ Debug.changeScriptBreakPointCondition = function(
Debug.changeScriptBreakPointIgnoreCount = function(
break_point_number, ignoreCount) {
- if (ignoreCount < 0) {
- throw new Error('Invalid argument');
- }
+ if (ignoreCount < 0) throw MakeError(kDebugger, 'Invalid argument');
var script_break_point = this.findScriptBreakPoint(break_point_number, false);
script_break_point.setIgnoreCount(ignoreCount);
};
@@ -878,7 +903,7 @@ Debug.isBreakOnUncaughtException = function() {
};
Debug.showBreakPoints = function(f, full, opt_position_alignment) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+ if (!IS_FUNCTION(f)) throw MakeError(kDebuggerType);
var source = full ? this.scriptSource(f) : this.source(f);
var offset = full ? this.sourcePosition(f) : 0;
var locations = this.breakLocations(f, opt_position_alignment);
@@ -925,8 +950,8 @@ function ExecutionState(break_id) {
ExecutionState.prototype.prepareStep = function(opt_action, opt_count,
opt_callframe) {
var action = Debug.StepAction.StepIn;
- if (!IS_UNDEFINED(opt_action)) action = builtins.$toNumber(opt_action);
- var count = opt_count ? builtins.$toNumber(opt_count) : 1;
+ if (!IS_UNDEFINED(opt_action)) action = ToNumber(opt_action);
+ var count = opt_count ? ToNumber(opt_count) : 1;
var callFrameId = 0;
if (!IS_UNDEFINED(opt_callframe)) {
callFrameId = opt_callframe.details_.frameId();
@@ -938,7 +963,7 @@ ExecutionState.prototype.prepareStep = function(opt_action, opt_count,
ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
opt_additional_context) {
return MakeMirror(%DebugEvaluateGlobal(this.break_id, source,
- Boolean(disable_break),
+ ToBoolean(disable_break),
opt_additional_context));
};
@@ -954,14 +979,16 @@ ExecutionState.prototype.frame = function(opt_index) {
// If no index supplied return the selected frame.
if (opt_index == null) opt_index = this.selected_frame;
if (opt_index < 0 || opt_index >= this.frameCount()) {
- throw new Error('Illegal frame index.');
+ throw MakeTypeError(kDebuggerFrame);
}
return new FrameMirror(this.break_id, opt_index);
};
ExecutionState.prototype.setSelectedFrame = function(index) {
- var i = builtins.$toNumber(index);
- if (i < 0 || i >= this.frameCount()) throw new Error('Illegal frame index.');
+ var i = ToNumber(index);
+ if (i < 0 || i >= this.frameCount()) {
+ throw MakeTypeError(kDebuggerFrame);
+ }
this.selected_frame = i;
};
@@ -1048,7 +1075,7 @@ BreakEvent.prototype.toJSONProtocol = function() {
o.body.breakpoints.push(number);
}
}
- return JSON.stringify(ObjectToProtocolObject_(o));
+ return JSONStringify(ObjectToProtocolObject_(o));
};
@@ -1277,7 +1304,7 @@ function ProtocolMessage(request) {
}
this.success = true;
// Handler may set this field to control debugger state.
- this.running = undefined;
+ this.running = UNDEFINED;
}
@@ -1323,7 +1350,7 @@ ProtocolMessage.prototype.toJSONProtocol = function() {
var serializer = MakeMirrorSerializer(true, this.options_);
if (this.body instanceof Mirror) {
bodyJson = serializer.serializeValue(this.body);
- } else if (this.body instanceof Array) {
+ } else if (this.body instanceof GlobalArray) {
bodyJson = [];
for (var i = 0; i < this.body.length; i++) {
if (this.body[i] instanceof Mirror) {
@@ -1345,7 +1372,7 @@ ProtocolMessage.prototype.toJSONProtocol = function() {
json.error_details = this.error_details;
}
json.running = this.running;
- return JSON.stringify(json);
+ return JSONStringify(json);
};
@@ -1361,21 +1388,22 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
try {
try {
// Convert the JSON string to an object.
- request = JSON.parse(json_request);
+ request = JSONParse(json_request);
// Create an initial response.
response = this.createResponse(request);
if (!request.type) {
- throw new Error('Type not specified');
+ throw MakeError(kDebugger, 'Type not specified');
}
if (request.type != 'request') {
- throw new Error("Illegal type '" + request.type + "' in request");
+ throw MakeError(kDebugger,
+ "Illegal type '" + request.type + "' in request");
}
if (!request.command) {
- throw new Error('Command not specified');
+ throw MakeError(kDebugger, 'Command not specified');
}
if (request.arguments) {
@@ -1395,7 +1423,8 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
if (IS_FUNCTION(handler)) {
%_CallFunction(this, request, response, handler);
} else {
- throw new Error('Unknown command "' + request.command + '" in request');
+ throw MakeError(kDebugger,
+ 'Unknown command "' + request.command + '" in request');
}
} catch (e) {
// If there is no response object created one (without command).
@@ -1403,7 +1432,7 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
response = this.createResponse();
}
response.success = false;
- response.message = builtins.$toString(e);
+ response.message = ToString(e);
}
// Return the response as a JSON encoded string.
@@ -1420,7 +1449,7 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
'"request_seq":' + request.seq + ',' +
'"type":"response",' +
'"success":false,' +
- '"message":"Internal error: ' + builtins.$toString(e) + '"}';
+ '"message":"Internal error: ' + ToString(e) + '"}';
}
} catch (e) {
// Failed in one of the catch blocks above - most generic error.
@@ -1441,9 +1470,10 @@ DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
// Get the stepcount argument if any.
if (stepcount) {
- count = builtins.$toNumber(stepcount);
+ count = ToNumber(stepcount);
if (count < 0) {
- throw new Error('Invalid stepcount argument "' + stepcount + '".');
+ throw MakeError(kDebugger,
+ 'Invalid stepcount argument "' + stepcount + '".');
}
}
@@ -1458,7 +1488,8 @@ DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
} else if (stepaction == 'out') {
action = Debug.StepAction.StepOut;
} else {
- throw new Error('Invalid stepaction argument "' + stepaction + '".');
+ throw MakeError(kDebugger,
+ 'Invalid stepaction argument "' + stepaction + '".');
}
}
@@ -1514,7 +1545,7 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
// Find the function through a global evaluate.
f = this.exec_state_.evaluateGlobal(target).value();
} catch (e) {
- response.failed('Error: "' + builtins.$toString(e) +
+ response.failed('Error: "' + ToString(e) +
'" evaluating "' + target + '"');
return;
}
@@ -1527,7 +1558,7 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
break_point_number = Debug.setBreakPoint(f, line, column, condition);
} else if (type == 'handle') {
// Find the object pointed by the specified handle.
- var handle = parseInt(target, 10);
+ var handle = ParseInt(target, 10);
var mirror = LookupMirror(handle);
if (!mirror) {
return response.failed('Object #' + handle + '# not found');
@@ -1581,8 +1612,8 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
response.body.type = 'scriptRegExp';
response.body.script_regexp = break_point.script_regexp_object().source;
} else {
- throw new Error("Internal error: Unexpected breakpoint type: " +
- break_point.type());
+ throw MakeError(kDebugger,
+ "Unexpected breakpoint type: " + break_point.type());
}
response.body.line = break_point.line();
response.body.column = break_point.column();
@@ -1603,7 +1634,7 @@ DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(
}
// Pull out arguments.
- var break_point = builtins.$toNumber(request.arguments.breakpoint);
+ var break_point = ToNumber(request.arguments.breakpoint);
var enabled = request.arguments.enabled;
var condition = request.arguments.condition;
var ignoreCount = request.arguments.ignoreCount;
@@ -1679,7 +1710,7 @@ DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(
}
// Pull out arguments.
- var break_point = builtins.$toNumber(request.arguments.breakpoint);
+ var break_point = ToNumber(request.arguments.breakpoint);
// Check for legal arguments.
if (!break_point) {
@@ -1723,8 +1754,8 @@ DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(
description.type = 'scriptRegExp';
description.script_regexp = break_point.script_regexp_object().source;
} else {
- throw new Error("Internal error: Unexpected breakpoint type: " +
- break_point.type());
+ throw MakeError(kDebugger,
+ "Unexpected breakpoint type: " + break_point.type());
}
array.push(description);
}
@@ -1825,7 +1856,7 @@ DebugCommandProcessor.prototype.backtraceRequest_ = function(
}
// Adjust the index.
- to_index = Math.min(total_frames, to_index);
+ to_index = MathMin(total_frames, to_index);
if (to_index <= from_index) {
var error = 'Invalid frame range';
@@ -1872,7 +1903,7 @@ DebugCommandProcessor.prototype.resolveFrameFromScopeDescription_ =
if (scope_description && !IS_UNDEFINED(scope_description.frameNumber)) {
var frame_index = scope_description.frameNumber;
if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
- throw new Error('Invalid frame number');
+ throw MakeTypeError(kDebuggerFrame);
}
return this.exec_state_.frame(frame_index);
} else {
@@ -1888,20 +1919,21 @@ DebugCommandProcessor.prototype.resolveScopeHolder_ =
function(scope_description) {
if (scope_description && "functionHandle" in scope_description) {
if (!IS_NUMBER(scope_description.functionHandle)) {
- throw new Error('Function handle must be a number');
+ throw MakeError(kDebugger, 'Function handle must be a number');
}
var function_mirror = LookupMirror(scope_description.functionHandle);
if (!function_mirror) {
- throw new Error('Failed to find function object by handle');
+ throw MakeError(kDebugger, 'Failed to find function object by handle');
}
if (!function_mirror.isFunction()) {
- throw new Error('Value of non-function type is found by handle');
+ throw MakeError(kDebugger,
+ 'Value of non-function type is found by handle');
}
return function_mirror;
} else {
// No frames no scopes.
if (this.exec_state_.frameCount() == 0) {
- throw new Error('No scopes');
+ throw MakeError(kDebugger, 'No scopes');
}
// Get the frame for which the scopes are requested.
@@ -1936,7 +1968,7 @@ DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
// With no scope argument just return top scope.
var scope_index = 0;
if (request.arguments && !IS_UNDEFINED(request.arguments.number)) {
- scope_index = builtins.$toNumber(request.arguments.number);
+ scope_index = ToNumber(request.arguments.number);
if (scope_index < 0 || scope_holder.scopeCount() <= scope_index) {
return response.failed('Invalid scope number');
}
@@ -1954,28 +1986,28 @@ DebugCommandProcessor.resolveValue_ = function(value_description) {
if ("handle" in value_description) {
var value_mirror = LookupMirror(value_description.handle);
if (!value_mirror) {
- throw new Error("Failed to resolve value by handle, ' #" +
- value_description.handle + "# not found");
+ throw MakeError(kDebugger, "Failed to resolve value by handle, ' #" +
+ value_description.handle + "# not found");
}
return value_mirror.value();
} else if ("stringDescription" in value_description) {
- if (value_description.type == BOOLEAN_TYPE) {
- return Boolean(value_description.stringDescription);
- } else if (value_description.type == NUMBER_TYPE) {
- return Number(value_description.stringDescription);
- } if (value_description.type == STRING_TYPE) {
- return String(value_description.stringDescription);
+ if (value_description.type == MirrorType.BOOLEAN_TYPE) {
+ return ToBoolean(value_description.stringDescription);
+ } else if (value_description.type == MirrorType.NUMBER_TYPE) {
+ return ToNumber(value_description.stringDescription);
+ } if (value_description.type == MirrorType.STRING_TYPE) {
+ return ToString(value_description.stringDescription);
} else {
- throw new Error("Unknown type");
+ throw MakeError(kDebugger, "Unknown type");
}
} else if ("value" in value_description) {
return value_description.value;
- } else if (value_description.type == UNDEFINED_TYPE) {
+ } else if (value_description.type == MirrorType.UNDEFINED_TYPE) {
return UNDEFINED;
- } else if (value_description.type == NULL_TYPE) {
+ } else if (value_description.type == MirrorType.NULL_TYPE) {
return null;
} else {
- throw new Error("Failed to parse value description");
+ throw MakeError(kDebugger, "Failed to parse value description");
}
};
@@ -2000,7 +2032,7 @@ DebugCommandProcessor.prototype.setVariableValueRequest_ =
if (IS_UNDEFINED(scope_description.number)) {
response.failed('Missing scope number');
}
- var scope_index = builtins.$toNumber(scope_description.number);
+ var scope_index = ToNumber(scope_description.number);
var scope = scope_holder.scope(scope_index);
@@ -2032,7 +2064,7 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
// The expression argument could be an integer so we convert it to a
// string.
try {
- expression = String(expression);
+ expression = ToString(expression);
} catch(e) {
return response.failed('Failed to convert expression argument to string');
}
@@ -2062,7 +2094,7 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
if (global) {
// Evaluate in the native context.
response.body = this.exec_state_.evaluateGlobal(
- expression, Boolean(disable_break), additional_context_object);
+ expression, ToBoolean(disable_break), additional_context_object);
return;
}
@@ -2078,18 +2110,18 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
// Check whether a frame was specified.
if (!IS_UNDEFINED(frame)) {
- var frame_number = builtins.$toNumber(frame);
+ var frame_number = ToNumber(frame);
if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
return response.failed('Invalid frame "' + frame + '"');
}
// Evaluate in the specified frame.
response.body = this.exec_state_.frame(frame_number).evaluate(
- expression, Boolean(disable_break), additional_context_object);
+ expression, ToBoolean(disable_break), additional_context_object);
return;
} else {
// Evaluate in the selected frame.
response.body = this.exec_state_.frame().evaluate(
- expression, Boolean(disable_break), additional_context_object);
+ expression, ToBoolean(disable_break), additional_context_object);
return;
}
};
@@ -2110,7 +2142,7 @@ DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
// Set 'includeSource' option for script lookup.
if (!IS_UNDEFINED(request.arguments.includeSource)) {
- var includeSource = builtins.$toBoolean(request.arguments.includeSource);
+ var includeSource = ToBoolean(request.arguments.includeSource);
response.setOption('includeSource', includeSource);
}
@@ -2178,7 +2210,7 @@ DebugCommandProcessor.prototype.sourceRequest_ = function(request, response) {
to_line = request.arguments.toLine;
if (!IS_UNDEFINED(request.arguments.frame)) {
- var frame_number = builtins.$toNumber(request.arguments.frame);
+ var frame_number = ToNumber(request.arguments.frame);
if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
return response.failed('Invalid frame "' + frame + '"');
}
@@ -2214,15 +2246,15 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
if (request.arguments) {
// Pull out arguments.
if (!IS_UNDEFINED(request.arguments.types)) {
- types = builtins.$toNumber(request.arguments.types);
- if (isNaN(types) || types < 0) {
+ types = ToNumber(request.arguments.types);
+ if (IsNaN(types) || types < 0) {
return response.failed('Invalid types "' +
request.arguments.types + '"');
}
}
if (!IS_UNDEFINED(request.arguments.includeSource)) {
- includeSource = builtins.$toBoolean(request.arguments.includeSource);
+ includeSource = ToBoolean(request.arguments.includeSource);
response.setOption('includeSource', includeSource);
}
@@ -2237,8 +2269,8 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
var filterStr = null;
var filterNum = null;
if (!IS_UNDEFINED(request.arguments.filter)) {
- var num = builtins.$toNumber(request.arguments.filter);
- if (!isNaN(num)) {
+ var num = ToNumber(request.arguments.filter);
+ if (!IsNaN(num)) {
filterNum = num;
}
filterStr = request.arguments.filter;
@@ -2331,7 +2363,7 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(
return;
}
- var change_log = new Array();
+ var change_log = new GlobalArray();
if (!IS_STRING(request.arguments.new_source)) {
throw "new_source argument expected";
@@ -2373,7 +2405,7 @@ DebugCommandProcessor.prototype.restartFrameRequest_ = function(
var frame_mirror;
// Check whether a frame was specified.
if (!IS_UNDEFINED(frame)) {
- var frame_number = builtins.$toNumber(frame);
+ var frame_number = ToNumber(frame);
if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
return response.failed('Invalid frame "' + frame + '"');
}
@@ -2567,3 +2599,31 @@ function ValueToProtocolValue_(value, mirror_serializer) {
}
return json;
}
+
+
+// -------------------------------------------------------------------
+// Exports
+
+utils.InstallConstants(global, [
+ "Debug", Debug,
+ "DebugCommandProcessor", DebugCommandProcessor,
+]);
+
+// Functions needed by the debugger runtime.
+utils.InstallFunctions(utils, DONT_ENUM, [
+ "MakeExecutionState", MakeExecutionState,
+ "MakeExceptionEvent", MakeExceptionEvent,
+ "MakeBreakEvent", MakeBreakEvent,
+ "MakeCompileEvent", MakeCompileEvent,
+ "MakePromiseEvent", MakePromiseEvent,
+ "MakeAsyncTaskEvent", MakeAsyncTaskEvent,
+ "IsBreakPointTriggered", IsBreakPointTriggered,
+ "UpdateScriptBreakPoints", UpdateScriptBreakPoints,
+]);
+
+// Export to liveedit.js
+utils.Export(function(to) {
+ to.GetScriptBreakPoints = GetScriptBreakPoints;
+});
+
+})
diff --git a/deps/v8/src/debug/ia32/debug-ia32.cc b/deps/v8/src/debug/ia32/debug-ia32.cc
new file mode 100644
index 0000000000..fb8d495af8
--- /dev/null
+++ b/deps/v8/src/debug/ia32/debug-ia32.cc
@@ -0,0 +1,145 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+#include "src/ia32/frames-ia32.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void EmitDebugBreakSlot(MacroAssembler* masm) {
+ Label check_codesize;
+ __ bind(&check_codesize);
+ __ Nop(Assembler::kDebugBreakSlotLength);
+ DCHECK_EQ(Assembler::kDebugBreakSlotLength,
+ masm->SizeOfCodeGeneratedSince(&check_codesize));
+}
+
+
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
+ int call_argc) {
+ // Generate enough nop's to make space for a call instruction.
+ masm->RecordDebugBreakSlot(mode, call_argc);
+ EmitDebugBreakSlot(masm);
+}
+
+
+void DebugCodegen::ClearDebugBreakSlot(Address pc) {
+ CodePatcher patcher(pc, Assembler::kDebugBreakSlotLength);
+ EmitDebugBreakSlot(patcher.masm());
+}
+
+
+void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+ DCHECK_EQ(Code::BUILTIN, code->kind());
+ static const int kSize = Assembler::kDebugBreakSlotLength;
+ CodePatcher patcher(pc, kSize);
+
+ // Add a label for checking the size of the code used for returning.
+ Label check_codesize;
+ patcher.masm()->bind(&check_codesize);
+ patcher.masm()->call(code->entry(), RelocInfo::NONE32);
+ // Check that the size of the code generated is as expected.
+ DCHECK_EQ(kSize, patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
+}
+
+
+void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
+ DebugBreakCallHelperMode mode) {
+ __ RecordComment("Debug break");
+
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Load padding words on stack.
+ for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
+ __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingValue)));
+ }
+ __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
+
+ if (mode == SAVE_RESULT_REGISTER) __ push(eax);
+
+ __ Move(eax, Immediate(0)); // No arguments.
+ __ mov(ebx,
+ Immediate(ExternalReference(
+ Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
+
+ CEntryStub ceb(masm->isolate(), 1);
+ __ CallStub(&ceb);
+
+ if (FLAG_debug_code) {
+ for (int i = 0; i < kNumJSCallerSaved; ++i) {
+ Register reg = {JSCallerSavedCode(i)};
+ __ Move(reg, Immediate(kDebugZapValue));
+ }
+ }
+
+ if (mode == SAVE_RESULT_REGISTER) __ pop(eax);
+
+ __ pop(ebx);
+ // We divide stored value by 2 (untagging) and multiply it by word's size.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
+ __ lea(esp, Operand(esp, ebx, times_half_pointer_size, 0));
+
+ // Get rid of the internal frame.
+ }
+
+ // This call did not replace a call , so there will be an unwanted
+ // return address left on the stack. Here we get rid of that.
+ __ add(esp, Immediate(kPointerSize));
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
+ __ jmp(Operand::StaticVariable(after_break_target));
+}
+
+
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ masm->ret(0);
+}
+
+
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ ExternalReference restarter_frame_function_slot =
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
+ __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
+
+ // We do not know our frame height, but set esp based on ebp.
+ __ lea(esp, Operand(ebp, -1 * kPointerSize));
+
+ __ pop(edi); // Function.
+ __ pop(ebp);
+
+ // Load context from the function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // Get function code.
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+
+ // Re-run JSFunction, edi is function, esi is context.
+ __ jmp(edx);
+}
+
+
+const bool LiveEdit::kFrameDropperSupported = true;
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 150dcb8892..8312dd3b6d 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -2,21 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/liveedit.h"
+#include "src/debug/liveedit.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/compiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
+#include "src/frames-inl.h"
#include "src/global-handles.h"
#include "src/messages.h"
#include "src/parser.h"
#include "src/scopeinfo.h"
#include "src/scopes.h"
+#include "src/v8.h"
#include "src/v8memory.h"
namespace v8 {
@@ -28,7 +27,8 @@ void SetElementSloppy(Handle<JSObject> object,
// Ignore return value from SetElement. It can only be a failure if there
// are element setters causing exceptions and the debugger context has none
// of these.
- JSObject::SetElement(object, index, value, SLOPPY).Assert();
+ Object::SetElement(object->GetIsolate(), object, index, value, SLOPPY)
+ .Assert();
}
@@ -900,23 +900,6 @@ MaybeHandle<JSArray> LiveEdit::GatherCompileInfo(Handle<Script> script,
}
-void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
- Isolate* isolate = array->GetIsolate();
- HandleScope scope(isolate);
- int len = GetArrayLength(array);
- for (int i = 0; i < len; i++) {
- Handle<SharedFunctionInfo> info(
- SharedFunctionInfo::cast(
- *Object::GetElement(isolate, array, i).ToHandleChecked()));
- SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create(isolate);
- Handle<String> name_handle(String::cast(info->name()));
- info_wrapper.SetProperties(name_handle, info->start_position(),
- info->end_position(), info);
- SetElementSloppy(array, i, info_wrapper.GetJSArray());
- }
-}
-
-
// Visitor that finds all references to a particular code object,
// including "CODE_TARGET" references in other code objects and replaces
// them on the fly.
@@ -1092,37 +1075,6 @@ class LiteralFixer {
};
-namespace {
-
-// Check whether the code is natural function code (not a lazy-compile stub
-// code).
-bool IsJSFunctionCode(Code* code) { return code->kind() == Code::FUNCTION; }
-
-
-// Returns true if an instance of candidate were inlined into function's code.
-bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
- DisallowHeapAllocation no_gc;
-
- if (function->code()->kind() != Code::OPTIMIZED_FUNCTION) return false;
-
- DeoptimizationInputData* const data =
- DeoptimizationInputData::cast(function->code()->deoptimization_data());
- if (data != function->GetIsolate()->heap()->empty_fixed_array()) {
- FixedArray* const literals = data->LiteralArray();
- int const inlined_count = data->InlinedFunctionCount()->value();
- for (int i = 0; i < inlined_count; ++i) {
- if (SharedFunctionInfo::cast(literals->get(i)) == candidate) {
- return true;
- }
- }
- }
-
- return false;
-}
-
-} // namespace
-
-
// Marks code that shares the same shared function info or has inlined
// code that shares the same function info.
class DependentFunctionMarker: public OptimizedFunctionVisitor {
@@ -1138,8 +1090,7 @@ class DependentFunctionMarker: public OptimizedFunctionVisitor {
virtual void VisitFunction(JSFunction* function) {
// It should be guaranteed by the iterator that everything is optimized.
DCHECK(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
- if (shared_info_ == function->shared() ||
- IsInlined(function, shared_info_)) {
+ if (function->Inlines(shared_info_)) {
// Mark the code for deoptimization.
function->code()->set_marked_for_deoptimization(true);
found_ = true;
@@ -1171,7 +1122,7 @@ void LiveEdit::ReplaceFunctionCode(
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
- if (IsJSFunctionCode(shared_info->code())) {
+ if (shared_info->code()->kind() == Code::FUNCTION) {
Handle<Code> code = compile_info_wrapper.GetFunctionCode();
ReplaceCodeObject(Handle<Code>(shared_info->code()), code);
Handle<Object> code_scope_info = compile_info_wrapper.GetCodeScopeInfo();
@@ -1187,13 +1138,6 @@ void LiveEdit::ReplaceFunctionCode(
}
}
- if (shared_info->debug_info()->IsDebugInfo()) {
- Handle<DebugInfo> debug_info(DebugInfo::cast(shared_info->debug_info()));
- Handle<Code> new_original_code =
- isolate->factory()->CopyCode(compile_info_wrapper.GetFunctionCode());
- debug_info->set_original_code(*new_original_code);
- }
-
int start_position = compile_info_wrapper.GetStartPosition();
int end_position = compile_info_wrapper.GetEndPosition();
shared_info->set_start_position(start_position);
@@ -1409,7 +1353,7 @@ void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
info->set_end_position(new_function_end);
info->set_function_token_position(new_function_token_pos);
- if (IsJSFunctionCode(info->code())) {
+ if (info->code()->kind() == Code::FUNCTION) {
// Patch relocation info section of the code.
Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()),
position_change_array);
@@ -1514,7 +1458,7 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
Handle<SharedFunctionInfo> shared =
UnwrapSharedFunctionInfoFromJSValue(jsvalue);
- if (function->shared() == *shared || IsInlined(*function, *shared)) {
+ if (function->Inlines(*shared)) {
SetElementSloppy(result, i, Handle<Smi>(Smi::FromInt(status), isolate));
return true;
}
diff --git a/deps/v8/src/liveedit.h b/deps/v8/src/debug/liveedit.h
index 495fcce64d..251368f0cb 100644
--- a/deps/v8/src/liveedit.h
+++ b/deps/v8/src/debug/liveedit.h
@@ -2,9 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LIVEEDIT_H_
-#define V8_LIVEEDIT_H_
-
+#ifndef V8_DEBUG_LIVEEDIT_H_
+#define V8_DEBUG_LIVEEDIT_H_
// Live Edit feature implementation.
@@ -82,8 +81,6 @@ class LiveEdit : AllStatic {
Handle<Script> script,
Handle<String> source);
- static void WrapSharedFunctionInfos(Handle<JSArray> array);
-
static void ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
Handle<JSArray> shared_info_array);
@@ -126,7 +123,7 @@ class LiveEdit : AllStatic {
// Return error message or NULL.
static const char* RestartFrame(JavaScriptFrame* frame);
- // A copy of this is in liveedit-debugger.js.
+ // A copy of this is in liveedit.js.
enum FunctionPatchabilityStatus {
FUNCTION_AVAILABLE_FOR_PATCH = 1,
FUNCTION_BLOCKED_ON_ACTIVE_STACK = 2,
@@ -249,7 +246,8 @@ class JSArrayBasedStruct {
protected:
void SetField(int field_position, Handle<Object> value) {
- JSObject::SetElement(array_, field_position, value, SLOPPY).Assert();
+ Object::SetElement(isolate(), array_, field_position, value, SLOPPY)
+ .Assert();
}
void SetSmiValueField(int field_position, int value) {
@@ -368,4 +366,4 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
} } // namespace v8::internal
-#endif /* V*_LIVEEDIT_H_ */
+#endif /* V8_DEBUG_LIVEEDIT_H_ */
diff --git a/deps/v8/src/liveedit-debugger.js b/deps/v8/src/debug/liveedit.js
index eaa23834c9..27425c154d 100644
--- a/deps/v8/src/liveedit-debugger.js
+++ b/deps/v8/src/debug/liveedit.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// LiveEdit feature implementation. The script should be executed after
-// debug-debugger.js.
+// debug.js.
// A LiveEdit namespace. It contains functions that modifies JavaScript code
// according to changes of script source (if possible).
@@ -20,15 +20,27 @@
//
// LiveEdit namespace is declared inside a single function constructor.
-"use strict";
+(function(global, utils) {
+ "use strict";
-Debug.LiveEdit = new function() {
+ // -------------------------------------------------------------------
+ // Imports
+
+ var FindScriptSourcePosition = global.Debug.findScriptSourcePosition;
+ var GetScriptBreakPoints;
+ var GlobalArray = global.Array;
+ var MathFloor = global.Math.floor;
+ var SyntaxError = global.SyntaxError;
+
+ utils.Import(function(from) {
+ GetScriptBreakPoints = from.GetScriptBreakPoints;
+ });
+
+ // -------------------------------------------------------------------
// Forward declaration for minifier.
var FunctionStatus;
- var NEEDS_STEP_IN_PROPERTY_NAME = "stack_update_needs_step_in";
-
// Applies the change to the script.
// The change is in form of list of chunks encoded in a single array as
// a series of triplets (pos1_start, pos1_end, pos2_end)
@@ -74,10 +86,10 @@ Debug.LiveEdit = new function() {
FindCorrespondingFunctions(root_old_node, root_new_node);
// Prepare to-do lists.
- var replace_code_list = new Array();
- var link_to_old_script_list = new Array();
- var link_to_original_script_list = new Array();
- var update_positions_list = new Array();
+ var replace_code_list = new GlobalArray();
+ var link_to_old_script_list = new GlobalArray();
+ var link_to_original_script_list = new GlobalArray();
+ var update_positions_list = new GlobalArray();
function HarvestTodo(old_node) {
function CollectDamaged(node) {
@@ -130,7 +142,7 @@ Debug.LiveEdit = new function() {
HarvestTodo(root_old_node);
// Collect shared infos for functions whose code need to be patched.
- var replaced_function_infos = new Array();
+ var replaced_function_infos = new GlobalArray();
for (var i = 0; i < replace_code_list.length; i++) {
var live_shared_function_infos =
replace_code_list[i].live_shared_function_infos;
@@ -149,12 +161,9 @@ Debug.LiveEdit = new function() {
var dropped_functions_number =
CheckStackActivations(replaced_function_infos, change_log);
- preview_description.stack_modified = dropped_functions_number != 0;
-
// Our current implementation requires client to manually issue "step in"
- // command for correct stack state.
- preview_description[NEEDS_STEP_IN_PROPERTY_NAME] =
- preview_description.stack_modified;
+ // command for correct stack state if the stack was modified.
+ preview_description.stack_modified = dropped_functions_number != 0;
// Start with breakpoints. Convert their line/column positions and
// temporary remove.
@@ -175,7 +184,7 @@ Debug.LiveEdit = new function() {
old_script = %LiveEditReplaceScript(script, new_source,
old_script_name);
- var link_to_old_script_report = new Array();
+ var link_to_old_script_report = new GlobalArray();
change_log.push( { linked_to_old_script: link_to_old_script_report } );
// We need to link to old script all former nested functions.
@@ -197,7 +206,7 @@ Debug.LiveEdit = new function() {
PatchFunctionCode(replace_code_list[i], change_log);
}
- var position_patch_report = new Array();
+ var position_patch_report = new GlobalArray();
change_log.push( {position_patched: position_patch_report} );
for (var i = 0; i < update_positions_list.length; i++) {
@@ -219,9 +228,6 @@ Debug.LiveEdit = new function() {
preview_description.updated = true;
return preview_description;
}
- // Function is public.
- this.ApplyPatchMultiChunk = ApplyPatchMultiChunk;
-
// Fully compiles source string as a script. Returns Array of
// FunctionCompileInfo -- a descriptions of all functions of the script.
@@ -238,8 +244,8 @@ Debug.LiveEdit = new function() {
var raw_compile_info = %LiveEditGatherCompileInfo(script, source);
// Sort function infos by start position field.
- var compile_info = new Array();
- var old_index_map = new Array();
+ var compile_info = new GlobalArray();
+ var old_index_map = new GlobalArray();
for (var i = 0; i < raw_compile_info.length; i++) {
var info = new FunctionCompileInfo(raw_compile_info[i]);
// Remove all links to the actual script. Breakpoints system and
@@ -372,7 +378,7 @@ Debug.LiveEdit = new function() {
break_point.clear();
// TODO(LiveEdit): be careful with resource offset here.
- var break_point_position = Debug.findScriptSourcePosition(original_script,
+ var break_point_position = FindScriptSourcePosition(original_script,
break_point.line(), break_point.column());
var old_position_description = {
@@ -448,7 +454,7 @@ Debug.LiveEdit = new function() {
}
function PosTranslator(diff_array) {
- var chunks = new Array();
+ var chunks = new GlobalArray();
var current_diff = 0;
for (var i = 0; i < diff_array.length; i += 3) {
var pos1_begin = diff_array[i];
@@ -474,7 +480,7 @@ Debug.LiveEdit = new function() {
var chunk_index2 = array.length - 1;
while (chunk_index1 < chunk_index2) {
- var middle_index = Math.floor((chunk_index1 + chunk_index2) / 2);
+ var middle_index = MathFloor((chunk_index1 + chunk_index2) / 2);
if (pos < array[middle_index + 1].pos1) {
chunk_index2 = middle_index;
} else {
@@ -555,7 +561,7 @@ Debug.LiveEdit = new function() {
function BuildNode() {
var my_index = index;
index++;
- var child_array = new Array();
+ var child_array = new GlobalArray();
while (index < code_info_array.length &&
code_info_array[index].outer_index == my_index) {
child_array.push(BuildNode());
@@ -687,7 +693,7 @@ Debug.LiveEdit = new function() {
var scope_change_description =
IsFunctionContextLocalsChanged(old_node.info, new_node.info);
if (scope_change_description) {
- old_node.status = FunctionStatus.CHANGED;
+ old_node.status = FunctionStatus.CHANGED;
}
var old_children = old_node.children;
@@ -788,7 +794,7 @@ Debug.LiveEdit = new function() {
function FindLiveSharedInfos(old_code_tree, script) {
var shared_raw_list = %LiveEditFindSharedFunctionInfosForScript(script);
- var shared_infos = new Array();
+ var shared_infos = new GlobalArray();
for (var i = 0; i < shared_raw_list.length; i++) {
shared_infos.push(new SharedInfoWrapper(shared_raw_list[i]));
@@ -905,7 +911,7 @@ Debug.LiveEdit = new function() {
// have activations on stack (of any thread). Throws a Failure exception
// if this proves to be false.
function CheckStackActivations(shared_wrapper_list, change_log) {
- var shared_list = new Array();
+ var shared_list = new GlobalArray();
for (var i = 0; i < shared_wrapper_list.length; i++) {
shared_list[i] = shared_wrapper_list[i].info;
}
@@ -915,8 +921,8 @@ Debug.LiveEdit = new function() {
throw new Failure(result[shared_list.length]);
}
- var problems = new Array();
- var dropped = new Array();
+ var problems = new GlobalArray();
+ var dropped = new GlobalArray();
for (var i = 0; i < shared_list.length; i++) {
var shared = shared_wrapper_list[i];
if (result[i] == FunctionPatchabilityStatus.REPLACED_ON_ACTIVE_STACK) {
@@ -969,8 +975,6 @@ Debug.LiveEdit = new function() {
function Failure(message) {
this.message = message;
}
- // Function (constructor) is public.
- this.Failure = Failure;
Failure.prototype.toString = function() {
return "LiveEdit Failure: " + this.message;
@@ -1005,8 +1009,6 @@ Debug.LiveEdit = new function() {
function GetPcFromSourcePos(func, source_pos) {
return %GetFunctionCodePositionFromSource(func, source_pos);
}
- // Function is public.
- this.GetPcFromSourcePos = GetPcFromSourcePos;
// LiveEdit main entry point: changes a script text to a new string.
function SetScriptSource(script, new_source, preview_only, change_log) {
@@ -1015,8 +1017,6 @@ Debug.LiveEdit = new function() {
return ApplyPatchMultiChunk(script, diff, new_source, preview_only,
change_log);
}
- // Function is public.
- this.SetScriptSource = SetScriptSource;
function CompareStrings(s1, s2) {
return %LiveEditCompareStrings(s1, s2);
@@ -1103,23 +1103,21 @@ Debug.LiveEdit = new function() {
return ProcessOldNode(old_code_tree);
}
- // Restarts call frame and returns value similar to what LiveEdit returns.
- function RestartFrame(frame_mirror) {
- var result = frame_mirror.restart();
- if (IS_STRING(result)) {
- throw new Failure("Failed to restart frame: " + result);
- }
- var result = {};
- result[NEEDS_STEP_IN_PROPERTY_NAME] = true;
- return result;
- }
- // Function is public.
- this.RestartFrame = RestartFrame;
+ // -------------------------------------------------------------------
+ // Exports
- // Functions are public for tests.
- this.TestApi = {
+ var LiveEdit = {};
+ LiveEdit.SetScriptSource = SetScriptSource;
+ LiveEdit.ApplyPatchMultiChunk = ApplyPatchMultiChunk;
+ LiveEdit.Failure = Failure;
+ LiveEdit.GetPcFromSourcePos = GetPcFromSourcePos;
+
+ LiveEdit.TestApi = {
PosTranslator: PosTranslator,
CompareStrings: CompareStrings,
ApplySingleChunkPatch: ApplySingleChunkPatch
};
-};
+
+ global.Debug.LiveEdit = LiveEdit;
+
+})
diff --git a/deps/v8/src/debug/mips/OWNERS b/deps/v8/src/debug/mips/OWNERS
new file mode 100644
index 0000000000..5508ba626f
--- /dev/null
+++ b/deps/v8/src/debug/mips/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/deps/v8/src/debug/mips/debug-mips.cc b/deps/v8/src/debug/mips/debug-mips.cc
new file mode 100644
index 0000000000..30bdcac1b6
--- /dev/null
+++ b/deps/v8/src/debug/mips/debug-mips.cc
@@ -0,0 +1,148 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_MIPS
+
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void EmitDebugBreakSlot(MacroAssembler* masm) {
+ Label check_size;
+ __ bind(&check_size);
+ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+ __ nop(MacroAssembler::DEBUG_BREAK_NOP);
+ }
+ DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
+ masm->InstructionsGeneratedSince(&check_size));
+}
+
+
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
+ int call_argc) {
+ // Generate enough nop's to make space for a call instruction. Avoid emitting
+ // the trampoline pool in the debug break slot code.
+ Assembler::BlockTrampolinePoolScope block_pool(masm);
+ masm->RecordDebugBreakSlot(mode, call_argc);
+ EmitDebugBreakSlot(masm);
+}
+
+
+void DebugCodegen::ClearDebugBreakSlot(Address pc) {
+ CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+ EmitDebugBreakSlot(patcher.masm());
+}
+
+
+void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+ DCHECK_EQ(Code::BUILTIN, code->kind());
+ CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+ // Patch the code changing the debug break slot code from:
+ // nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
+ // nop(DEBUG_BREAK_NOP)
+ // nop(DEBUG_BREAK_NOP)
+ // nop(DEBUG_BREAK_NOP)
+ // to a call to the debug break slot code.
+ // li t9, address (lui t9 / ori t9 instruction pair)
+ // call t9 (jalr t9 / nop instruction pair)
+ patcher.masm()->li(v8::internal::t9,
+ Operand(reinterpret_cast<int32_t>(code->entry())));
+ patcher.masm()->Call(v8::internal::t9);
+}
+
+
+void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
+ DebugBreakCallHelperMode mode) {
+ __ RecordComment("Debug break");
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Load padding words on stack.
+ __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
+ __ Subu(sp, sp,
+ Operand(kPointerSize * LiveEdit::kFramePaddingInitialSize));
+ for (int i = LiveEdit::kFramePaddingInitialSize - 1; i >= 0; i--) {
+ __ sw(at, MemOperand(sp, kPointerSize * i));
+ }
+ __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
+ __ push(at);
+
+ if (mode == SAVE_RESULT_REGISTER) __ push(v0);
+
+ __ PrepareCEntryArgs(0); // No arguments.
+ __ PrepareCEntryFunction(ExternalReference(
+ Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate()));
+
+ CEntryStub ceb(masm->isolate(), 1);
+ __ CallStub(&ceb);
+
+ if (FLAG_debug_code) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ Register reg = {JSCallerSavedCode(i)};
+ __ li(reg, kDebugZapValue);
+ }
+ }
+
+ if (mode == SAVE_RESULT_REGISTER) __ pop(v0);
+
+ // Don't bother removing padding bytes pushed on the stack
+ // as the frame is going to be restored right away.
+
+ // Leave the internal frame.
+ }
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
+ __ li(t9, Operand(after_break_target));
+ __ lw(t9, MemOperand(t9));
+ __ Jump(t9);
+}
+
+
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ __ Ret();
+}
+
+
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ ExternalReference restarter_frame_function_slot =
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
+ __ li(at, Operand(restarter_frame_function_slot));
+ __ sw(zero_reg, MemOperand(at, 0));
+
+ // We do not know our frame height, but set sp based on fp.
+ __ Subu(sp, fp, Operand(kPointerSize));
+
+ __ Pop(ra, fp, a1); // Return address, Frame, Function.
+
+ // Load context from the function.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Get function code.
+ __ lw(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset));
+ __ Addu(t9, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Re-run JSFunction, a1 is function, cp is context.
+ __ Jump(t9);
+}
+
+
+const bool LiveEdit::kFrameDropperSupported = true;
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/debug/mips64/OWNERS b/deps/v8/src/debug/mips64/OWNERS
new file mode 100644
index 0000000000..5508ba626f
--- /dev/null
+++ b/deps/v8/src/debug/mips64/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/deps/v8/src/debug/mips64/debug-mips64.cc b/deps/v8/src/debug/mips64/debug-mips64.cc
new file mode 100644
index 0000000000..9b4d355d79
--- /dev/null
+++ b/deps/v8/src/debug/mips64/debug-mips64.cc
@@ -0,0 +1,150 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void EmitDebugBreakSlot(MacroAssembler* masm) {
+ Label check_size;
+ __ bind(&check_size);
+ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+ __ nop(MacroAssembler::DEBUG_BREAK_NOP);
+ }
+ DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
+ masm->InstructionsGeneratedSince(&check_size));
+}
+
+
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
+ int call_argc) {
+ // Generate enough nop's to make space for a call instruction. Avoid emitting
+ // the trampoline pool in the debug break slot code.
+ Assembler::BlockTrampolinePoolScope block_pool(masm);
+ masm->RecordDebugBreakSlot(mode, call_argc);
+ EmitDebugBreakSlot(masm);
+}
+
+
+void DebugCodegen::ClearDebugBreakSlot(Address pc) {
+ CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+ EmitDebugBreakSlot(patcher.masm());
+}
+
+
+void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+ DCHECK_EQ(Code::BUILTIN, code->kind());
+ CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+ // Patch the code changing the debug break slot code from:
+ // nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
+ // nop(DEBUG_BREAK_NOP)
+ // nop(DEBUG_BREAK_NOP)
+ // nop(DEBUG_BREAK_NOP)
+ // nop(DEBUG_BREAK_NOP)
+ // nop(DEBUG_BREAK_NOP)
+ // to a call to the debug break slot code.
+ // li t9, address (4-instruction sequence on mips64)
+ // call t9 (jalr t9 / nop instruction pair)
+ patcher.masm()->li(v8::internal::t9,
+ Operand(reinterpret_cast<int64_t>(code->entry())),
+ ADDRESS_LOAD);
+ patcher.masm()->Call(v8::internal::t9);
+}
+
+
+void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
+ DebugBreakCallHelperMode mode) {
+ __ RecordComment("Debug break");
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Load padding words on stack.
+ __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
+ __ Dsubu(sp, sp,
+ Operand(kPointerSize * LiveEdit::kFramePaddingInitialSize));
+ for (int i = LiveEdit::kFramePaddingInitialSize - 1; i >= 0; i--) {
+ __ sd(at, MemOperand(sp, kPointerSize * i));
+ }
+ __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
+ __ push(at);
+
+ if (mode == SAVE_RESULT_REGISTER) __ push(v0);
+
+ __ PrepareCEntryArgs(0); // No arguments.
+ __ PrepareCEntryFunction(ExternalReference(
+ Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate()));
+
+ CEntryStub ceb(masm->isolate(), 1);
+ __ CallStub(&ceb);
+
+ if (FLAG_debug_code) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ Register reg = {JSCallerSavedCode(i)};
+ __ li(reg, kDebugZapValue);
+ }
+ }
+
+ if (mode == SAVE_RESULT_REGISTER) __ pop(v0);
+
+ // Don't bother removing padding bytes pushed on the stack
+ // as the frame is going to be restored right away.
+
+ // Leave the internal frame.
+ }
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
+ __ li(t9, Operand(after_break_target));
+ __ ld(t9, MemOperand(t9));
+ __ Jump(t9);
+}
+
+
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ __ Ret();
+}
+
+
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ ExternalReference restarter_frame_function_slot =
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
+ __ li(at, Operand(restarter_frame_function_slot));
+ __ sw(zero_reg, MemOperand(at, 0));
+
+ // We do not know our frame height, but set sp based on fp.
+ __ Dsubu(sp, fp, Operand(kPointerSize));
+
+ __ Pop(ra, fp, a1); // Return address, Frame, Function.
+
+ // Load context from the function.
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Get function code.
+ __ ld(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset));
+ __ Daddu(t9, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Re-run JSFunction, a1 is function, cp is context.
+ __ Jump(t9);
+}
+
+
+const bool LiveEdit::kFrameDropperSupported = true;
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/debug/mirrors.js
index d37776af38..f47a2d058e 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -1,8 +1,82 @@
// Copyright 2006-2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+
+(function(global, utils) {
"use strict";
+// ----------------------------------------------------------------------------
+// Imports
+
+var FunctionSourceString;
+var GlobalArray = global.Array;
+var IsNaN = global.isNaN;
+var JSONStringify = global.JSON.stringify;
+var MathMin = global.Math.min;
+var ToBoolean;
+var ToString;
+
+utils.Import(function(from) {
+ FunctionSourceString = from.FunctionSourceString;
+ ToBoolean = from.ToBoolean;
+ ToString = from.ToString;
+});
+
+// ----------------------------------------------------------------------------
+
+// Mirror hierarchy:
+// - Mirror
+// - ValueMirror
+// - UndefinedMirror
+// - NullMirror
+// - BooleanMirror
+// - NumberMirror
+// - StringMirror
+// - SymbolMirror
+// - ObjectMirror
+// - FunctionMirror
+// - UnresolvedFunctionMirror
+// - ArrayMirror
+// - DateMirror
+// - RegExpMirror
+// - ErrorMirror
+// - PromiseMirror
+// - MapMirror
+// - SetMirror
+// - IteratorMirror
+// - GeneratorMirror
+// - PropertyMirror
+// - InternalPropertyMirror
+// - FrameMirror
+// - ScriptMirror
+// - ScopeMirror
+
+// Type names of the different mirrors.
+var MirrorType = {
+ UNDEFINED_TYPE : 'undefined',
+ NULL_TYPE : 'null',
+ BOOLEAN_TYPE : 'boolean',
+ NUMBER_TYPE : 'number',
+ STRING_TYPE : 'string',
+ SYMBOL_TYPE : 'symbol',
+ OBJECT_TYPE : 'object',
+ FUNCTION_TYPE : 'function',
+ REGEXP_TYPE : 'regexp',
+ ERROR_TYPE : 'error',
+ PROPERTY_TYPE : 'property',
+ INTERNAL_PROPERTY_TYPE : 'internalProperty',
+ FRAME_TYPE : 'frame',
+ SCRIPT_TYPE : 'script',
+ CONTEXT_TYPE : 'context',
+ SCOPE_TYPE : 'scope',
+ PROMISE_TYPE : 'promise',
+ MAP_TYPE : 'map',
+ SET_TYPE : 'set',
+ ITERATOR_TYPE : 'iterator',
+ GENERATOR_TYPE : 'generator',
+}
+
+
// Handle id counters.
var next_handle_ = 0;
var next_transient_handle_ = -1;
@@ -12,16 +86,33 @@ var mirror_cache_ = [];
var mirror_cache_enabled_ = true;
+function MirrorCacheIsEmpty() {
+ return next_handle_ == 0 && mirror_cache_.length == 0;
+}
+
+
function ToggleMirrorCache(value) {
mirror_cache_enabled_ = value;
+ ClearMirrorCache();
+}
+
+
+function ClearMirrorCache(value) {
next_handle_ = 0;
mirror_cache_ = [];
}
+// Wrapper to check whether an object is a Promise. The call may not work
+// if promises are not enabled.
+// TODO(yangguo): remove try-catch once promises are enabled by default.
function ObjectIsPromise(value) {
- return IS_SPEC_OBJECT(value) &&
- !IS_UNDEFINED(%DebugGetProperty(value, builtins.$promiseStatus));
+ try {
+ return IS_SPEC_OBJECT(value) &&
+ !IS_UNDEFINED(%DebugGetProperty(value, builtins.$promiseStatus));
+ } catch (e) {
+ return false;
+ }
}
@@ -44,8 +135,8 @@ function MakeMirror(value, opt_transient) {
return mirror;
}
// Special check for NaN as NaN == NaN is false.
- if (mirror.isNumber() && isNaN(mirror.value()) &&
- typeof value == 'number' && isNaN(value)) {
+ if (mirror.isNumber() && IsNaN(mirror.value()) &&
+ typeof value == 'number' && IsNaN(value)) {
return mirror;
}
}
@@ -86,7 +177,7 @@ function MakeMirror(value, opt_transient) {
} else if (IS_GENERATOR(value)) {
mirror = new GeneratorMirror(value);
} else {
- mirror = new ObjectMirror(value, OBJECT_TYPE, opt_transient);
+ mirror = new ObjectMirror(value, MirrorType.OBJECT_TYPE, opt_transient);
}
if (mirror_cache_enabled_) mirror_cache_[mirror.handle()] = mirror;
@@ -102,7 +193,9 @@ function MakeMirror(value, opt_transient) {
* undefined if no mirror with the requested handle was found
*/
function LookupMirror(handle) {
- if (!mirror_cache_enabled_) throw new Error("Mirror cache is disabled");
+ if (!mirror_cache_enabled_) {
+ throw MakeError(kDebugger, "Mirror cache is disabled");
+ }
return mirror_cache_[handle];
}
@@ -138,30 +231,6 @@ function inherits(ctor, superCtor) {
ctor.prototype.constructor = ctor;
}
-
-// Type names of the different mirrors.
-var UNDEFINED_TYPE = 'undefined';
-var NULL_TYPE = 'null';
-var BOOLEAN_TYPE = 'boolean';
-var NUMBER_TYPE = 'number';
-var STRING_TYPE = 'string';
-var SYMBOL_TYPE = 'symbol';
-var OBJECT_TYPE = 'object';
-var FUNCTION_TYPE = 'function';
-var REGEXP_TYPE = 'regexp';
-var ERROR_TYPE = 'error';
-var PROPERTY_TYPE = 'property';
-var INTERNAL_PROPERTY_TYPE = 'internalProperty';
-var FRAME_TYPE = 'frame';
-var SCRIPT_TYPE = 'script';
-var CONTEXT_TYPE = 'context';
-var SCOPE_TYPE = 'scope';
-var PROMISE_TYPE = 'promise';
-var MAP_TYPE = 'map';
-var SET_TYPE = 'set';
-var ITERATOR_TYPE = 'iterator';
-var GENERATOR_TYPE = 'generator';
-
// Maximum length when sending strings through the JSON protocol.
var kMaxProtocolStringLength = 80;
@@ -197,33 +266,6 @@ var ScopeType = { Global: 0,
Block: 5,
Script: 6 };
-
-// Mirror hierarchy:
-// - Mirror
-// - ValueMirror
-// - UndefinedMirror
-// - NullMirror
-// - NumberMirror
-// - StringMirror
-// - SymbolMirror
-// - ObjectMirror
-// - FunctionMirror
-// - UnresolvedFunctionMirror
-// - ArrayMirror
-// - DateMirror
-// - RegExpMirror
-// - ErrorMirror
-// - PromiseMirror
-// - MapMirror
-// - SetMirror
-// - IteratorMirror
-// - GeneratorMirror
-// - PropertyMirror
-// - InternalPropertyMirror
-// - FrameMirror
-// - ScriptMirror
-
-
/**
* Base class for all mirror objects.
* @param {string} type The type of the mirror
@@ -543,7 +585,7 @@ ValueMirror.prototype.value = function() {
* @extends ValueMirror
*/
function UndefinedMirror() {
- %_CallFunction(this, UNDEFINED_TYPE, UNDEFINED, ValueMirror);
+ %_CallFunction(this, MirrorType.UNDEFINED_TYPE, UNDEFINED, ValueMirror);
}
inherits(UndefinedMirror, ValueMirror);
@@ -559,7 +601,7 @@ UndefinedMirror.prototype.toText = function() {
* @extends ValueMirror
*/
function NullMirror() {
- %_CallFunction(this, NULL_TYPE, null, ValueMirror);
+ %_CallFunction(this, MirrorType.NULL_TYPE, null, ValueMirror);
}
inherits(NullMirror, ValueMirror);
@@ -576,7 +618,7 @@ NullMirror.prototype.toText = function() {
* @extends ValueMirror
*/
function BooleanMirror(value) {
- %_CallFunction(this, BOOLEAN_TYPE, value, ValueMirror);
+ %_CallFunction(this, MirrorType.BOOLEAN_TYPE, value, ValueMirror);
}
inherits(BooleanMirror, ValueMirror);
@@ -593,7 +635,7 @@ BooleanMirror.prototype.toText = function() {
* @extends ValueMirror
*/
function NumberMirror(value) {
- %_CallFunction(this, NUMBER_TYPE, value, ValueMirror);
+ %_CallFunction(this, MirrorType.NUMBER_TYPE, value, ValueMirror);
}
inherits(NumberMirror, ValueMirror);
@@ -610,7 +652,7 @@ NumberMirror.prototype.toText = function() {
* @extends ValueMirror
*/
function StringMirror(value) {
- %_CallFunction(this, STRING_TYPE, value, ValueMirror);
+ %_CallFunction(this, MirrorType.STRING_TYPE, value, ValueMirror);
}
inherits(StringMirror, ValueMirror);
@@ -639,7 +681,7 @@ StringMirror.prototype.toText = function() {
* @extends Mirror
*/
function SymbolMirror(value) {
- %_CallFunction(this, SYMBOL_TYPE, value, ValueMirror);
+ %_CallFunction(this, MirrorType.SYMBOL_TYPE, value, ValueMirror);
}
inherits(SymbolMirror, ValueMirror);
@@ -663,7 +705,8 @@ SymbolMirror.prototype.toText = function() {
* @extends ValueMirror
*/
function ObjectMirror(value, type, transient) {
- %_CallFunction(this, type || OBJECT_TYPE, value, transient, ValueMirror);
+ type = type || MirrorType.OBJECT_TYPE;
+ %_CallFunction(this, type, value, transient, ValueMirror);
}
inherits(ObjectMirror, ValueMirror);
@@ -763,9 +806,9 @@ ObjectMirror.prototype.propertyNames = function(kind, limit) {
}
}
}
- limit = Math.min(limit || total, total);
+ limit = MathMin(limit || total, total);
- var names = new Array(limit);
+ var names = new GlobalArray(limit);
var index = 0;
// Copy names for named properties.
@@ -796,7 +839,7 @@ ObjectMirror.prototype.propertyNames = function(kind, limit) {
*/
ObjectMirror.prototype.properties = function(kind, limit) {
var names = this.propertyNames(kind, limit);
- var properties = new Array(names.length);
+ var properties = new GlobalArray(names.length);
for (var i = 0; i < names.length; i++) {
properties[i] = this.property(names[i]);
}
@@ -913,7 +956,7 @@ ObjectMirror.GetInternalProperties = function(value) {
* @extends ObjectMirror
*/
function FunctionMirror(value) {
- %_CallFunction(this, value, FUNCTION_TYPE, ObjectMirror);
+ %_CallFunction(this, value, MirrorType.FUNCTION_TYPE, ObjectMirror);
this.resolved_ = true;
}
inherits(FunctionMirror, ObjectMirror);
@@ -956,7 +999,7 @@ FunctionMirror.prototype.source = function() {
// Return source if function is resolved. Otherwise just fall through to
// return undefined.
if (this.resolved()) {
- return builtins.$functionSourceString(this.value_);
+ return FunctionSourceString(this.value_);
}
};
@@ -1067,7 +1110,7 @@ FunctionMirror.prototype.toText = function() {
function UnresolvedFunctionMirror(value) {
// Construct this using the ValueMirror as an unresolved function is not a
// real object but just a string.
- %_CallFunction(this, FUNCTION_TYPE, value, ValueMirror);
+ %_CallFunction(this, MirrorType.FUNCTION_TYPE, value, ValueMirror);
this.propertyCount_ = 0;
this.elementCount_ = 0;
this.resolved_ = false;
@@ -1101,7 +1144,7 @@ UnresolvedFunctionMirror.prototype.name = function() {
UnresolvedFunctionMirror.prototype.inferredName = function() {
- return undefined;
+ return UNDEFINED;
};
@@ -1131,10 +1174,10 @@ ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index,
opt_to_index) {
var from_index = opt_from_index || 0;
var to_index = opt_to_index || this.length() - 1;
- if (from_index > to_index) return new Array();
- var values = new Array(to_index - from_index + 1);
+ if (from_index > to_index) return new GlobalArray();
+ var values = new GlobalArray(to_index - from_index + 1);
for (var i = from_index; i <= to_index; i++) {
- var details = %DebugGetPropertyDetails(this.value_, builtins.$toString(i));
+ var details = %DebugGetPropertyDetails(this.value_, ToString(i));
var value;
if (details) {
value = new PropertyMirror(this, i, details);
@@ -1160,7 +1203,7 @@ inherits(DateMirror, ObjectMirror);
DateMirror.prototype.toText = function() {
- var s = JSON.stringify(this.value_);
+ var s = JSONStringify(this.value_);
return s.substring(1, s.length - 1); // cut quotes
};
@@ -1172,7 +1215,7 @@ DateMirror.prototype.toText = function() {
* @extends ObjectMirror
*/
function RegExpMirror(value) {
- %_CallFunction(this, value, REGEXP_TYPE, ObjectMirror);
+ %_CallFunction(this, value, MirrorType.REGEXP_TYPE, ObjectMirror);
}
inherits(RegExpMirror, ObjectMirror);
@@ -1244,7 +1287,7 @@ RegExpMirror.prototype.toText = function() {
* @extends ObjectMirror
*/
function ErrorMirror(value) {
- %_CallFunction(this, value, ERROR_TYPE, ObjectMirror);
+ %_CallFunction(this, value, MirrorType.ERROR_TYPE, ObjectMirror);
}
inherits(ErrorMirror, ObjectMirror);
@@ -1277,7 +1320,7 @@ ErrorMirror.prototype.toText = function() {
* @extends ObjectMirror
*/
function PromiseMirror(value) {
- %_CallFunction(this, value, PROMISE_TYPE, ObjectMirror);
+ %_CallFunction(this, value, MirrorType.PROMISE_TYPE, ObjectMirror);
}
inherits(PromiseMirror, ObjectMirror);
@@ -1306,7 +1349,7 @@ PromiseMirror.prototype.promiseValue = function() {
function MapMirror(value) {
- %_CallFunction(this, value, MAP_TYPE, ObjectMirror);
+ %_CallFunction(this, value, MirrorType.MAP_TYPE, ObjectMirror);
}
inherits(MapMirror, ObjectMirror);
@@ -1346,7 +1389,7 @@ MapMirror.prototype.entries = function(opt_limit) {
function SetMirror(value) {
- %_CallFunction(this, value, SET_TYPE, ObjectMirror);
+ %_CallFunction(this, value, MirrorType.SET_TYPE, ObjectMirror);
}
inherits(SetMirror, ObjectMirror);
@@ -1380,7 +1423,7 @@ SetMirror.prototype.values = function(opt_limit) {
function IteratorMirror(value) {
- %_CallFunction(this, value, ITERATOR_TYPE, ObjectMirror);
+ %_CallFunction(this, value, MirrorType.ITERATOR_TYPE, ObjectMirror);
}
inherits(IteratorMirror, ObjectMirror);
@@ -1412,7 +1455,7 @@ IteratorMirror.prototype.preview = function(opt_limit) {
* @extends Mirror
*/
function GeneratorMirror(value) {
- %_CallFunction(this, value, GENERATOR_TYPE, ObjectMirror);
+ %_CallFunction(this, value, MirrorType.GENERATOR_TYPE, ObjectMirror);
}
inherits(GeneratorMirror, ObjectMirror);
@@ -1479,7 +1522,7 @@ GeneratorMirror.prototype.receiver = function() {
* @extends Mirror
*/
function PropertyMirror(mirror, name, details) {
- %_CallFunction(this, PROPERTY_TYPE, Mirror);
+ %_CallFunction(this, MirrorType.PROPERTY_TYPE, Mirror);
this.mirror_ = mirror;
this.name_ = name;
this.value_ = details[0];
@@ -1622,7 +1665,7 @@ PropertyMirror.prototype.isNative = function() {
* @extends Mirror
*/
function InternalPropertyMirror(name, value) {
- %_CallFunction(this, INTERNAL_PROPERTY_TYPE, Mirror);
+ %_CallFunction(this, MirrorType.INTERNAL_PROPERTY_TYPE, Mirror);
this.name_ = name;
this.value_ = value;
}
@@ -1835,7 +1878,7 @@ FrameDetails.prototype.stepInPositionsImpl = function() {
* @extends Mirror
*/
function FrameMirror(break_id, index) {
- %_CallFunction(this, FRAME_TYPE, Mirror);
+ %_CallFunction(this, MirrorType.FRAME_TYPE, Mirror);
this.break_id_ = break_id;
this.index_ = index;
this.details_ = new FrameDetails(break_id, index);
@@ -2034,7 +2077,7 @@ FrameMirror.prototype.evaluate = function(source, disable_break,
this.details_.frameId(),
this.details_.inlinedFrameIndex(),
source,
- Boolean(disable_break),
+ ToBoolean(disable_break),
opt_context_object));
};
@@ -2209,7 +2252,7 @@ function ScopeDetails(frame, fun, index, opt_details) {
} else {
this.details_ = opt_details || %GetFunctionScopeDetails(fun.value(), index);
this.fun_value_ = fun.value();
- this.break_id_ = undefined;
+ this.break_id_ = UNDEFINED;
}
this.index_ = index;
}
@@ -2241,9 +2284,7 @@ ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
raw_res = %SetScopeVariableValue(this.fun_value_, null, null, this.index_,
name, new_value);
}
- if (!raw_res) {
- throw new Error("Failed to set variable value");
- }
+ if (!raw_res) throw MakeError(kDebugger, "Failed to set variable value");
};
@@ -2257,15 +2298,15 @@ ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
* @constructor
* @extends Mirror
*/
-function ScopeMirror(frame, function, index, opt_details) {
- %_CallFunction(this, SCOPE_TYPE, Mirror);
+function ScopeMirror(frame, fun, index, opt_details) {
+ %_CallFunction(this, MirrorType.SCOPE_TYPE, Mirror);
if (frame) {
this.frame_index_ = frame.index_;
} else {
- this.frame_index_ = undefined;
+ this.frame_index_ = UNDEFINED;
}
this.scope_index_ = index;
- this.details_ = new ScopeDetails(frame, function, index, opt_details);
+ this.details_ = new ScopeDetails(frame, fun, index, opt_details);
}
inherits(ScopeMirror, Mirror);
@@ -2313,7 +2354,7 @@ ScopeMirror.prototype.setVariableValue = function(name, new_value) {
* @extends Mirror
*/
function ScriptMirror(script) {
- %_CallFunction(this, SCRIPT_TYPE, Mirror);
+ %_CallFunction(this, MirrorType.SCRIPT_TYPE, Mirror);
this.script_ = script;
this.context_ = new ContextMirror(script.context_data);
this.allocateHandle_();
@@ -2434,7 +2475,7 @@ ScriptMirror.prototype.toText = function() {
* @extends Mirror
*/
function ContextMirror(data) {
- %_CallFunction(this, CONTEXT_TYPE, Mirror);
+ %_CallFunction(this, MirrorType.CONTEXT_TYPE, Mirror);
this.data_ = data;
this.allocateHandle_();
}
@@ -2564,30 +2605,30 @@ JSONProtocolSerializer.prototype.serializeReferenceWithDisplayData_ =
o.ref = mirror.handle();
o.type = mirror.type();
switch (mirror.type()) {
- case UNDEFINED_TYPE:
- case NULL_TYPE:
- case BOOLEAN_TYPE:
- case NUMBER_TYPE:
+ case MirrorType.UNDEFINED_TYPE:
+ case MirrorType.NULL_TYPE:
+ case MirrorType.BOOLEAN_TYPE:
+ case MirrorType.NUMBER_TYPE:
o.value = mirror.value();
break;
- case STRING_TYPE:
+ case MirrorType.STRING_TYPE:
o.value = mirror.getTruncatedValue(this.maxStringLength_());
break;
- case SYMBOL_TYPE:
+ case MirrorType.SYMBOL_TYPE:
o.description = mirror.description();
break;
- case FUNCTION_TYPE:
+ case MirrorType.FUNCTION_TYPE:
o.name = mirror.name();
o.inferredName = mirror.inferredName();
if (mirror.script()) {
o.scriptId = mirror.script().id();
}
break;
- case ERROR_TYPE:
- case REGEXP_TYPE:
+ case MirrorType.ERROR_TYPE:
+ case MirrorType.REGEXP_TYPE:
o.value = mirror.toText();
break;
- case OBJECT_TYPE:
+ case MirrorType.OBJECT_TYPE:
o.className = mirror.className();
break;
}
@@ -2621,22 +2662,22 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
content.type = mirror.type();
switch (mirror.type()) {
- case UNDEFINED_TYPE:
- case NULL_TYPE:
+ case MirrorType.UNDEFINED_TYPE:
+ case MirrorType.NULL_TYPE:
// Undefined and null are represented just by their type.
break;
- case BOOLEAN_TYPE:
+ case MirrorType.BOOLEAN_TYPE:
// Boolean values are simply represented by their value.
content.value = mirror.value();
break;
- case NUMBER_TYPE:
+ case MirrorType.NUMBER_TYPE:
// Number values are simply represented by their value.
content.value = NumberToJSON_(mirror.value());
break;
- case STRING_TYPE:
+ case MirrorType.STRING_TYPE:
// String values might have their value cropped to keep down size.
if (this.maxStringLength_() != -1 &&
mirror.length() > this.maxStringLength_()) {
@@ -2650,36 +2691,37 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
content.length = mirror.length();
break;
- case SYMBOL_TYPE:
+ case MirrorType.SYMBOL_TYPE:
content.description = mirror.description();
break;
- case OBJECT_TYPE:
- case FUNCTION_TYPE:
- case ERROR_TYPE:
- case REGEXP_TYPE:
- case PROMISE_TYPE:
- case GENERATOR_TYPE:
+ case MirrorType.OBJECT_TYPE:
+ case MirrorType.FUNCTION_TYPE:
+ case MirrorType.ERROR_TYPE:
+ case MirrorType.REGEXP_TYPE:
+ case MirrorType.PROMISE_TYPE:
+ case MirrorType.GENERATOR_TYPE:
// Add object representation.
this.serializeObject_(mirror, content, details);
break;
- case PROPERTY_TYPE:
- case INTERNAL_PROPERTY_TYPE:
- throw new Error('PropertyMirror cannot be serialized independently');
+ case MirrorType.PROPERTY_TYPE:
+ case MirrorType.INTERNAL_PROPERTY_TYPE:
+ throw MakeError(kDebugger,
+ 'PropertyMirror cannot be serialized independently');
break;
- case FRAME_TYPE:
+ case MirrorType.FRAME_TYPE:
// Add object representation.
this.serializeFrame_(mirror, content);
break;
- case SCOPE_TYPE:
+ case MirrorType.SCOPE_TYPE:
// Add object representation.
this.serializeScope_(mirror, content);
break;
- case SCRIPT_TYPE:
+ case MirrorType.SCRIPT_TYPE:
// Script is represented by id, name and source attributes.
if (mirror.name()) {
content.name = mirror.name();
@@ -2720,7 +2762,7 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
}
break;
- case CONTEXT_TYPE:
+ case MirrorType.CONTEXT_TYPE:
content.data = mirror.data();
break;
}
@@ -2818,7 +2860,7 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
// Add actual properties - named properties followed by indexed properties.
var propertyNames = mirror.propertyNames(PropertyKind.Named);
var propertyIndexes = mirror.propertyNames(PropertyKind.Indexed);
- var p = new Array(propertyNames.length + propertyIndexes.length);
+ var p = new GlobalArray(propertyNames.length + propertyIndexes.length);
for (var i = 0; i < propertyNames.length; i++) {
var propertyMirror = mirror.property(propertyNames[i]);
p[i] = this.serializeProperty_(propertyMirror);
@@ -2949,7 +2991,7 @@ JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
content.returnValue = this.serializeReference(mirror.returnValue());
}
content.debuggerFrame = mirror.isDebuggerFrame();
- var x = new Array(mirror.argumentCount());
+ var x = new GlobalArray(mirror.argumentCount());
for (var i = 0; i < mirror.argumentCount(); i++) {
var arg = {};
var argument_name = mirror.argumentName(i);
@@ -2960,7 +3002,7 @@ JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
x[i] = arg;
}
content.arguments = x;
- var x = new Array(mirror.localCount());
+ var x = new GlobalArray(mirror.localCount());
for (var i = 0; i < mirror.localCount(); i++) {
var local = {};
local.name = mirror.localName(i);
@@ -3005,7 +3047,7 @@ JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
* @returns {number|string} Protocol value.
*/
function NumberToJSON_(value) {
- if (isNaN(value)) {
+ if (IsNaN(value)) {
return 'NaN';
}
if (!NUMBER_IS_FINITE(value)) {
@@ -3017,3 +3059,58 @@ function NumberToJSON_(value) {
}
return value;
}
+
+// ----------------------------------------------------------------------------
+// Exports
+
+utils.InstallFunctions(global, DONT_ENUM, [
+ "MakeMirror", MakeMirror,
+ "MakeMirrorSerializer", MakeMirrorSerializer,
+ "LookupMirror", LookupMirror,
+ "ToggleMirrorCache", ToggleMirrorCache,
+ "MirrorCacheIsEmpty", MirrorCacheIsEmpty,
+]);
+
+utils.InstallConstants(global, [
+ "ScopeType", ScopeType,
+ "PropertyKind", PropertyKind,
+ "PropertyType", PropertyType,
+ "PropertyAttribute", PropertyAttribute,
+ "Mirror", Mirror,
+ "ValueMirror", ValueMirror,
+ "UndefinedMirror", UndefinedMirror,
+ "NullMirror", NullMirror,
+ "BooleanMirror", BooleanMirror,
+ "NumberMirror", NumberMirror,
+ "StringMirror", StringMirror,
+ "SymbolMirror", SymbolMirror,
+ "ObjectMirror", ObjectMirror,
+ "FunctionMirror", FunctionMirror,
+ "UnresolvedFunctionMirror", UnresolvedFunctionMirror,
+ "ArrayMirror", ArrayMirror,
+ "DateMirror", DateMirror,
+ "RegExpMirror", RegExpMirror,
+ "ErrorMirror", ErrorMirror,
+ "PromiseMirror", PromiseMirror,
+ "MapMirror", MapMirror,
+ "SetMirror", SetMirror,
+ "IteratorMirror", IteratorMirror,
+ "GeneratorMirror", GeneratorMirror,
+ "PropertyMirror", PropertyMirror,
+ "InternalPropertyMirror", InternalPropertyMirror,
+ "FrameMirror", FrameMirror,
+ "ScriptMirror", ScriptMirror,
+ "ScopeMirror", ScopeMirror,
+ "FrameDetails", FrameDetails,
+]);
+
+// Functions needed by the debugger runtime.
+utils.InstallFunctions(utils, DONT_ENUM, [
+ "ClearMirrorCache", ClearMirrorCache
+]);
+
+// Export to debug.js
+utils.Export(function(to) {
+ to.MirrorType = MirrorType;
+});
+})
diff --git a/deps/v8/src/debug/ppc/OWNERS b/deps/v8/src/debug/ppc/OWNERS
new file mode 100644
index 0000000000..eb007cb908
--- /dev/null
+++ b/deps/v8/src/debug/ppc/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/src/debug/ppc/debug-ppc.cc b/deps/v8/src/debug/ppc/debug-ppc.cc
new file mode 100644
index 0000000000..ed4a632475
--- /dev/null
+++ b/deps/v8/src/debug/ppc/debug-ppc.cc
@@ -0,0 +1,157 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void EmitDebugBreakSlot(MacroAssembler* masm) {
+ Label check_size;
+ __ bind(&check_size);
+ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+ __ nop(MacroAssembler::DEBUG_BREAK_NOP);
+ }
+ DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
+ masm->InstructionsGeneratedSince(&check_size));
+}
+
+
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
+ int call_argc) {
+ // Generate enough nop's to make space for a call instruction. Avoid emitting
+ // the trampoline pool in the debug break slot code.
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+ masm->RecordDebugBreakSlot(mode, call_argc);
+ EmitDebugBreakSlot(masm);
+}
+
+
+void DebugCodegen::ClearDebugBreakSlot(Address pc) {
+ CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+ EmitDebugBreakSlot(patcher.masm());
+}
+
+
+void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+ DCHECK_EQ(Code::BUILTIN, code->kind());
+ CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+ // Patch the code changing the debug break slot code from
+ //
+ // ori r3, r3, 0
+ // ori r3, r3, 0
+ // ori r3, r3, 0
+ // ori r3, r3, 0
+ // ori r3, r3, 0
+ //
+ // to a call to the debug break code, using a FIXED_SEQUENCE.
+ //
+ // mov r0, <address>
+ // mtlr r0
+ // blrl
+ //
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
+ patcher.masm()->mov(v8::internal::r0,
+ Operand(reinterpret_cast<intptr_t>(code->entry())));
+ patcher.masm()->mtctr(v8::internal::r0);
+ patcher.masm()->bctrl();
+}
+
+
+void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
+ DebugBreakCallHelperMode mode) {
+ __ RecordComment("Debug break");
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Load padding words on stack.
+ __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingValue));
+ for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
+ __ push(ip);
+ }
+ __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
+ __ push(ip);
+
+ if (mode == SAVE_RESULT_REGISTER) __ push(r3);
+
+ __ mov(r3, Operand::Zero()); // no arguments
+ __ mov(r4,
+ Operand(ExternalReference(
+ Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
+
+ CEntryStub ceb(masm->isolate(), 1);
+ __ CallStub(&ceb);
+
+ if (FLAG_debug_code) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ Register reg = {JSCallerSavedCode(i)};
+ __ mov(reg, Operand(kDebugZapValue));
+ }
+ }
+
+ if (mode == SAVE_RESULT_REGISTER) __ pop(r3);
+
+ // Don't bother removing padding bytes pushed on the stack
+ // as the frame is going to be restored right away.
+
+ // Leave the internal frame.
+ }
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
+ __ mov(ip, Operand(after_break_target));
+ __ LoadP(ip, MemOperand(ip));
+ __ JumpToJSEntry(ip);
+}
+
+
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ __ Ret();
+}
+
+
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ ExternalReference restarter_frame_function_slot =
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
+ __ mov(ip, Operand(restarter_frame_function_slot));
+ __ li(r4, Operand::Zero());
+ __ StoreP(r4, MemOperand(ip, 0));
+
+ // Load the function pointer off of our current stack frame.
+ __ LoadP(r4, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset -
+ kPointerSize));
+
+ // Pop return address and frame
+ __ LeaveFrame(StackFrame::INTERNAL);
+
+ // Load context from the function.
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+ // Get function code.
+ __ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
+ __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Re-run JSFunction, r4 is function, cp is context.
+ __ Jump(ip);
+}
+
+
+const bool LiveEdit::kFrameDropperSupported = true;
+
+#undef __
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/debug/x64/debug-x64.cc b/deps/v8/src/debug/x64/debug-x64.cc
new file mode 100644
index 0000000000..3b65678709
--- /dev/null
+++ b/deps/v8/src/debug/x64/debug-x64.cc
@@ -0,0 +1,146 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/assembler.h"
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void EmitDebugBreakSlot(MacroAssembler* masm) {
+ Label check_codesize;
+ __ bind(&check_codesize);
+ __ Nop(Assembler::kDebugBreakSlotLength);
+ DCHECK_EQ(Assembler::kDebugBreakSlotLength,
+ masm->SizeOfCodeGeneratedSince(&check_codesize));
+}
+
+
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
+ int call_argc) {
+ // Generate enough nop's to make space for a call instruction.
+ masm->RecordDebugBreakSlot(mode, call_argc);
+ EmitDebugBreakSlot(masm);
+}
+
+
+void DebugCodegen::ClearDebugBreakSlot(Address pc) {
+ CodePatcher patcher(pc, Assembler::kDebugBreakSlotLength);
+ EmitDebugBreakSlot(patcher.masm());
+}
+
+
+void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+ DCHECK_EQ(Code::BUILTIN, code->kind());
+ static const int kSize = Assembler::kDebugBreakSlotLength;
+ CodePatcher patcher(pc, kSize);
+ Label check_codesize;
+ patcher.masm()->bind(&check_codesize);
+ patcher.masm()->movp(kScratchRegister, reinterpret_cast<void*>(code->entry()),
+ Assembler::RelocInfoNone());
+ patcher.masm()->call(kScratchRegister);
+ // Check that the size of the code generated is as expected.
+ DCHECK_EQ(kSize, patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
+}
+
+
+void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
+ DebugBreakCallHelperMode mode) {
+ __ RecordComment("Debug break");
+
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Load padding words on stack.
+ for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
+ __ Push(Smi::FromInt(LiveEdit::kFramePaddingValue));
+ }
+ __ Push(Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
+
+ if (mode == SAVE_RESULT_REGISTER) __ Push(rax);
+
+ __ Set(rax, 0); // No arguments (argc == 0).
+ __ Move(rbx, ExternalReference(Runtime::FunctionForId(Runtime::kDebugBreak),
+ masm->isolate()));
+
+ CEntryStub ceb(masm->isolate(), 1);
+ __ CallStub(&ceb);
+
+ if (FLAG_debug_code) {
+ for (int i = 0; i < kNumJSCallerSaved; ++i) {
+ Register reg = {JSCallerSavedCode(i)};
+ __ Set(reg, kDebugZapValue);
+ }
+ }
+
+ if (mode == SAVE_RESULT_REGISTER) __ Pop(rax);
+
+ // Read current padding counter and skip corresponding number of words.
+ __ Pop(kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, kScratchRegister);
+ __ leap(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
+
+ // Get rid of the internal frame.
+ }
+
+ // This call did not replace a call , so there will be an unwanted
+ // return address left on the stack. Here we get rid of that.
+ __ addp(rsp, Immediate(kPCOnStackSize));
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
+ __ Move(kScratchRegister, after_break_target);
+ __ Jump(Operand(kScratchRegister, 0));
+}
+
+
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ masm->ret(0);
+}
+
+
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ ExternalReference restarter_frame_function_slot =
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
+ __ Move(rax, restarter_frame_function_slot);
+ __ movp(Operand(rax, 0), Immediate(0));
+
+ // We do not know our frame height, but set rsp based on rbp.
+ __ leap(rsp, Operand(rbp, -1 * kPointerSize));
+
+ __ Pop(rdi); // Function.
+ __ popq(rbp);
+
+ // Load context from the function.
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ // Get function code.
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ leap(rdx, FieldOperand(rdx, Code::kHeaderSize));
+
+ // Re-run JSFunction, rdi is function, rsi is context.
+ __ jmp(rdx);
+}
+
+const bool LiveEdit::kFrameDropperSupported = true;
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/debug/x87/OWNERS b/deps/v8/src/debug/x87/OWNERS
new file mode 100644
index 0000000000..dd9998b261
--- /dev/null
+++ b/deps/v8/src/debug/x87/OWNERS
@@ -0,0 +1 @@
+weiliang.lin@intel.com
diff --git a/deps/v8/src/debug/x87/debug-x87.cc b/deps/v8/src/debug/x87/debug-x87.cc
new file mode 100644
index 0000000000..5ec608a99a
--- /dev/null
+++ b/deps/v8/src/debug/x87/debug-x87.cc
@@ -0,0 +1,145 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+#include "src/x87/frames-x87.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void EmitDebugBreakSlot(MacroAssembler* masm) {
+ Label check_codesize;
+ __ bind(&check_codesize);
+ __ Nop(Assembler::kDebugBreakSlotLength);
+ DCHECK_EQ(Assembler::kDebugBreakSlotLength,
+ masm->SizeOfCodeGeneratedSince(&check_codesize));
+}
+
+
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
+ int call_argc) {
+ // Generate enough nop's to make space for a call instruction.
+ masm->RecordDebugBreakSlot(mode, call_argc);
+ EmitDebugBreakSlot(masm);
+}
+
+
+void DebugCodegen::ClearDebugBreakSlot(Address pc) {
+ CodePatcher patcher(pc, Assembler::kDebugBreakSlotLength);
+ EmitDebugBreakSlot(patcher.masm());
+}
+
+
+void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+ DCHECK_EQ(Code::BUILTIN, code->kind());
+ static const int kSize = Assembler::kDebugBreakSlotLength;
+ CodePatcher patcher(pc, kSize);
+
+ // Add a label for checking the size of the code used for returning.
+ Label check_codesize;
+ patcher.masm()->bind(&check_codesize);
+ patcher.masm()->call(code->entry(), RelocInfo::NONE32);
+ // Check that the size of the code generated is as expected.
+ DCHECK_EQ(kSize, patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
+}
+
+
+void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
+ DebugBreakCallHelperMode mode) {
+ __ RecordComment("Debug break");
+
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Load padding words on stack.
+ for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
+ __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingValue)));
+ }
+ __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
+
+ if (mode == SAVE_RESULT_REGISTER) __ push(eax);
+
+ __ Move(eax, Immediate(0)); // No arguments.
+ __ mov(ebx,
+ Immediate(ExternalReference(
+ Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
+
+ CEntryStub ceb(masm->isolate(), 1);
+ __ CallStub(&ceb);
+
+ if (FLAG_debug_code) {
+ for (int i = 0; i < kNumJSCallerSaved; ++i) {
+ Register reg = {JSCallerSavedCode(i)};
+ __ Move(reg, Immediate(kDebugZapValue));
+ }
+ }
+
+ if (mode == SAVE_RESULT_REGISTER) __ pop(eax);
+
+ __ pop(ebx);
+ // We divide stored value by 2 (untagging) and multiply it by word's size.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
+ __ lea(esp, Operand(esp, ebx, times_half_pointer_size, 0));
+
+ // Get rid of the internal frame.
+ }
+
+ // This call did not replace a call , so there will be an unwanted
+ // return address left on the stack. Here we get rid of that.
+ __ add(esp, Immediate(kPointerSize));
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ ExternalReference after_break_target =
+ ExternalReference::debug_after_break_target_address(masm->isolate());
+ __ jmp(Operand::StaticVariable(after_break_target));
+}
+
+
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ masm->ret(0);
+}
+
+
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ ExternalReference restarter_frame_function_slot =
+ ExternalReference::debug_restarter_frame_function_pointer_address(
+ masm->isolate());
+ __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
+
+ // We do not know our frame height, but set esp based on ebp.
+ __ lea(esp, Operand(ebp, -1 * kPointerSize));
+
+ __ pop(edi); // Function.
+ __ pop(ebp);
+
+ // Load context from the function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // Get function code.
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+
+ // Re-run JSFunction, edi is function, esi is context.
+ __ jmp(edx);
+}
+
+
+const bool LiveEdit::kFrameDropperSupported = true;
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 684a37ff87..0efc4de369 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -9,7 +9,8 @@
#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
#include "src/disasm.h"
-#include "src/full-codegen.h"
+#include "src/frames-inl.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
#include "src/macro-assembler.h"
#include "src/prettyprinter.h"
@@ -154,8 +155,11 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
// Always use the actual stack slots when calculating the fp to sp
// delta adding two for the function and context.
unsigned stack_slots = code->stack_slots();
+ unsigned arguments_stack_height =
+ Deoptimizer::ComputeOutgoingArgumentSize(code, deoptimization_index);
unsigned fp_to_sp_delta = (stack_slots * kPointerSize) +
- StandardFrameConstants::kFixedFrameSizeFromFp;
+ StandardFrameConstants::kFixedFrameSizeFromFp +
+ arguments_stack_height;
Deoptimizer* deoptimizer = new Deoptimizer(isolate,
function,
@@ -414,11 +418,9 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
PatchCodeForDeoptimization(isolate, codes[i]);
// We might be in the middle of incremental marking with compaction.
- // Ignore all slots that might have been recorded in the body of the
- // deoptimized code object.
- Code* code = codes[i];
- isolate->heap()->mark_compact_collector()->RemoveObjectSlots(
- code->instruction_start(), code->address() + code->Size());
+ // Tell collector to treat this code object in a special way and
+ // ignore all slots that might have been recorded on it.
+ isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
}
}
@@ -1221,6 +1223,12 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset, "code object\n");
+ // The allocation site.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(isolate_->heap()->undefined_value());
+ output_frame->SetFrameSlot(output_offset, value);
+ DebugPrintOutputSlot(value, frame_index, output_offset, "allocation site\n");
+
// Number of incoming arguments.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
@@ -1230,6 +1238,12 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
PrintF(trace_scope_->file(), "(%d)\n", height - 1);
}
+ // The original constructor.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(isolate_->heap()->undefined_value());
+ output_frame->SetFrameSlot(output_offset, value);
+ DebugPrintOutputSlot(value, frame_index, output_offset, "new.target\n");
+
// The newly allocated object was passed as receiver in the artificial
// constructor stub environment created by HEnvironment::CopyForInlining().
output_offset -= kPointerSize;
@@ -1411,6 +1425,9 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
// reg = saved frame
// reg = JSFunction context
//
+ // Caller stack params contain the register parameters to the stub first,
+ // and then, if the descriptor specifies a constant number of stack
+ // parameters, the stack parameters as well.
TranslatedFrame* translated_frame =
&(translated_state_.frames()[frame_index]);
@@ -1426,11 +1443,12 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
// object to the callee and optionally the space to pass the argument
// object to the stub failure handler.
int param_count = descriptor.GetRegisterParameterCount();
+ int stack_param_count = descriptor.GetStackParameterCount();
CHECK_EQ(translated_frame->height(), param_count);
CHECK_GE(param_count, 0);
- int height_in_bytes = kPointerSize * param_count + sizeof(Arguments) +
- kPointerSize;
+ int height_in_bytes = kPointerSize * (param_count + stack_param_count) +
+ sizeof(Arguments) + kPointerSize;
int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
int input_frame_size = input_->GetFrameSize();
int output_frame_size = height_in_bytes + fixed_frame_size;
@@ -1503,7 +1521,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
"function (stub failure sentinel)\n");
- intptr_t caller_arg_count = 0;
+ intptr_t caller_arg_count = stack_param_count;
bool arg_count_known = !descriptor.stack_parameter_count().is_valid();
// Build the Arguments object for the caller's parameters and a pointer to it.
@@ -1551,6 +1569,20 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
}
}
+ // Copy constant stack parameters to the failure frame. If the number of stack
+ // parameters is not known in the descriptor, the arguments object is the way
+ // to access them.
+ for (int i = 0; i < stack_param_count; i++) {
+ output_frame_offset -= kPointerSize;
+ Object** stack_parameter = reinterpret_cast<Object**>(
+ frame_ptr + StandardFrameConstants::kCallerSPOffset +
+ (stack_param_count - i - 1) * kPointerSize);
+ value = reinterpret_cast<intptr_t>(*stack_parameter);
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ DebugPrintOutputSlot(value, frame_index, output_frame_offset,
+ "stack parameter\n");
+ }
+
CHECK_EQ(0u, output_frame_offset);
if (!arg_count_known) {
@@ -1580,8 +1612,8 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
// Compute this frame's PC, state, and continuation.
Code* trampoline = NULL;
StubFunctionMode function_mode = descriptor.function_mode();
- StubFailureTrampolineStub(isolate_,
- function_mode).FindCodeInCache(&trampoline);
+ StubFailureTrampolineStub(isolate_, function_mode)
+ .FindCodeInCache(&trampoline);
DCHECK(trampoline != NULL);
output_frame->SetPc(reinterpret_cast<intptr_t>(
trampoline->instruction_start()));
@@ -1742,7 +1774,8 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
StandardFrameConstants::kFixedFrameSizeFromFp;
if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
unsigned stack_slots = compiled_code_->stack_slots();
- unsigned outgoing_size = ComputeOutgoingArgumentSize();
+ unsigned outgoing_size =
+ ComputeOutgoingArgumentSize(compiled_code_, bailout_id_);
CHECK(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
}
return result;
@@ -1770,10 +1803,12 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
}
-unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
+// static
+unsigned Deoptimizer::ComputeOutgoingArgumentSize(Code* code,
+ unsigned bailout_id) {
DeoptimizationInputData* data =
- DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
- unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ unsigned height = data->ArgumentsStackHeight(bailout_id)->value();
return height * kPointerSize;
}
@@ -2238,7 +2273,12 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
source_position_ = code->SourcePosition(pc);
for (int i = 0; i < expression_count_; i++) {
- SetExpression(i, output_frame->GetExpression(i));
+ Object* value = output_frame->GetExpression(i);
+ // Replace materialization markers with the undefined value.
+ if (value == deoptimizer->isolate()->heap()->arguments_marker()) {
+ value = deoptimizer->isolate()->heap()->undefined_value();
+ }
+ SetExpression(i, value);
}
if (has_arguments_adaptor) {
@@ -2249,7 +2289,12 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
parameters_count_ = output_frame->ComputeParametersCount();
parameters_ = new Object* [parameters_count_];
for (int i = 0; i < parameters_count_; i++) {
- SetParameter(i, output_frame->GetParameter(i));
+ Object* value = output_frame->GetParameter(i);
+ // Replace materialization markers with the undefined value.
+ if (value == deoptimizer->isolate()->heap()->arguments_marker()) {
+ value = deoptimizer->isolate()->heap()->undefined_value();
+ }
+ SetParameter(i, value);
}
}
@@ -2564,22 +2609,6 @@ int TranslatedValue::GetChildrenCount() const {
}
-int TranslatedState::SlotOffsetFp(int slot_index) {
- if (slot_index >= 0) {
- const int offset = StandardFrameConstants::kExpressionsOffset;
- return offset - (slot_index * kPointerSize);
- } else {
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return offset - ((slot_index + 1) * kPointerSize);
- }
-}
-
-
-Address TranslatedState::SlotAddress(Address fp, int slot_index) {
- return fp + SlotOffsetFp(slot_index);
-}
-
-
uint32_t TranslatedState::GetUInt32Slot(Address fp, int slot_offset) {
Address address = fp + slot_offset;
#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT
@@ -2682,7 +2711,8 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
- SmartArrayPointer<char> name = shared_info->DebugName()->ToCString();
+ base::SmartArrayPointer<char> name =
+ shared_info->DebugName()->ToCString();
PrintF(trace_file, " reading input frame %s", name.get());
int arg_count = shared_info->internal_formal_parameter_count() + 1;
PrintF(trace_file, " => node=%d, args=%d, height=%d; inputs:\n",
@@ -2696,7 +2726,8 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
- SmartArrayPointer<char> name = shared_info->DebugName()->ToCString();
+ base::SmartArrayPointer<char> name =
+ shared_info->DebugName()->ToCString();
PrintF(trace_file, " reading arguments adaptor frame %s", name.get());
PrintF(trace_file, " => height=%d; inputs:\n", height);
}
@@ -2708,7 +2739,8 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
- SmartArrayPointer<char> name = shared_info->DebugName()->ToCString();
+ base::SmartArrayPointer<char> name =
+ shared_info->DebugName()->ToCString();
PrintF(trace_file, " reading construct stub frame %s", name.get());
PrintF(trace_file, " => height=%d; inputs:\n", height);
}
@@ -2719,7 +2751,8 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
SharedFunctionInfo* shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
if (trace_file != nullptr) {
- SmartArrayPointer<char> name = shared_info->DebugName()->ToCString();
+ base::SmartArrayPointer<char> name =
+ shared_info->DebugName()->ToCString();
PrintF(trace_file, " reading getter frame %s; inputs:\n", name.get());
}
return TranslatedFrame::AccessorFrame(TranslatedFrame::kGetter,
@@ -2730,7 +2763,8 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
SharedFunctionInfo* shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
if (trace_file != nullptr) {
- SmartArrayPointer<char> name = shared_info->DebugName()->ToCString();
+ base::SmartArrayPointer<char> name =
+ shared_info->DebugName()->ToCString();
PrintF(trace_file, " reading setter frame %s; inputs:\n", name.get());
}
return TranslatedFrame::AccessorFrame(TranslatedFrame::kSetter,
@@ -2897,7 +2931,8 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
}
case Translation::STACK_SLOT: {
- int slot_offset = SlotOffsetFp(iterator->Next());
+ int slot_offset =
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
intptr_t value = *(reinterpret_cast<intptr_t*>(fp + slot_offset));
if (trace_file != nullptr) {
PrintF(trace_file, "0x%08" V8PRIxPTR " ; [fp %c %d] ", value,
@@ -2908,7 +2943,8 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
}
case Translation::INT32_STACK_SLOT: {
- int slot_offset = SlotOffsetFp(iterator->Next());
+ int slot_offset =
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
uint32_t value = GetUInt32Slot(fp, slot_offset);
if (trace_file != nullptr) {
PrintF(trace_file, "%d ; (int) [fp %c %d] ",
@@ -2919,7 +2955,8 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
}
case Translation::UINT32_STACK_SLOT: {
- int slot_offset = SlotOffsetFp(iterator->Next());
+ int slot_offset =
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
uint32_t value = GetUInt32Slot(fp, slot_offset);
if (trace_file != nullptr) {
PrintF(trace_file, "%u ; (uint) [fp %c %d] ", value,
@@ -2929,7 +2966,8 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
}
case Translation::BOOL_STACK_SLOT: {
- int slot_offset = SlotOffsetFp(iterator->Next());
+ int slot_offset =
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
uint32_t value = GetUInt32Slot(fp, slot_offset);
if (trace_file != nullptr) {
PrintF(trace_file, "%u ; (bool) [fp %c %d] ", value,
@@ -2939,7 +2977,8 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
}
case Translation::DOUBLE_STACK_SLOT: {
- int slot_offset = SlotOffsetFp(iterator->Next());
+ int slot_offset =
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
double value = ReadDoubleValue(fp + slot_offset);
if (trace_file != nullptr) {
PrintF(trace_file, "%e ; (double) [fp %c %d] ", value,
@@ -3165,7 +3204,7 @@ Handle<Object> TranslatedState::MaterializeAt(int frame_index,
}
case JS_OBJECT_TYPE: {
Handle<JSObject> object =
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED, false);
+ isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED);
slot->value_ = object;
Handle<Object> properties = MaterializeAt(frame_index, value_index);
Handle<Object> elements = MaterializeAt(frame_index, value_index);
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index ab76d41b6b..b116ccd54d 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -5,8 +5,6 @@
#ifndef V8_DEOPTIMIZER_H_
#define V8_DEOPTIMIZER_H_
-#include "src/v8.h"
-
#include "src/allocation.h"
#include "src/macro-assembler.h"
@@ -278,8 +276,6 @@ class TranslatedState {
Handle<Object> MaterializeObjectAt(int object_index);
bool GetAdaptedArguments(Handle<JSObject>* result, int frame_index);
- static int SlotOffsetFp(int slot_index);
- static Address SlotAddress(Address fp, int slot_index);
static uint32_t GetUInt32Slot(Address fp, int slot_index);
std::vector<TranslatedFrame> frames_;
@@ -385,7 +381,8 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
V(kValueMismatch, "value mismatch") \
V(kWrongInstanceType, "wrong instance type") \
V(kWrongMap, "wrong map") \
- V(kUndefinedOrNullInForIn, "null or undefined in for-in")
+ V(kUndefinedOrNullInForIn, "null or undefined in for-in") \
+ V(kUndefinedOrNullInToObject, "null or undefined in ToObject")
class Deoptimizer : public Malloced {
@@ -615,7 +612,7 @@ class Deoptimizer : public Malloced {
unsigned ComputeFixedSize(JSFunction* function) const;
unsigned ComputeIncomingArgumentSize(JSFunction* function) const;
- unsigned ComputeOutgoingArgumentSize() const;
+ static unsigned ComputeOutgoingArgumentSize(Code* code, unsigned bailout_id);
Object* ComputeLiteral(int index) const;
diff --git a/deps/v8/src/disasm.h b/deps/v8/src/disasm.h
index 89b7fc2615..263aa317d5 100644
--- a/deps/v8/src/disasm.h
+++ b/deps/v8/src/disasm.h
@@ -5,6 +5,8 @@
#ifndef V8_DISASM_H_
#define V8_DISASM_H_
+#include "src/utils.h"
+
namespace disasm {
typedef unsigned char byte;
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 47e506d112..411b09fcaa 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -2,14 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/disassembler.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/disasm.h"
-#include "src/disassembler.h"
#include "src/macro-assembler.h"
#include "src/snapshot/serialize.h"
#include "src/string-stream.h"
@@ -182,7 +181,7 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
relocinfo.target_object()->ShortPrint(&accumulator);
- SmartArrayPointer<const char> obj_name = accumulator.ToCString();
+ base::SmartArrayPointer<const char> obj_name = accumulator.ToCString();
out.AddFormatted(" ;; object: %s", obj_name.get());
} else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
const char* reference_name = ref_encoder.NameOfAddress(
@@ -197,8 +196,8 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
Code::Kind kind = code->kind();
if (code->is_inline_cache_stub()) {
if (kind == Code::LOAD_IC &&
- LoadICState::GetContextualMode(code->extra_ic_state()) ==
- CONTEXTUAL) {
+ LoadICState::GetTypeofMode(code->extra_ic_state()) ==
+ NOT_INSIDE_TYPEOF) {
out.AddFormatted(" contextual,");
}
InlineCacheState ic_state = code->ic_state();
@@ -216,15 +215,7 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
DCHECK(major_key == CodeStub::MajorKeyFromKey(key));
out.AddFormatted(" %s, %s, ", Code::Kind2String(kind),
CodeStub::MajorName(major_key, false));
- switch (major_key) {
- case CodeStub::CallFunction: {
- int argc = CallFunctionStub::ExtractArgcFromMinorKey(minor_key);
- out.AddFormatted("argc = %d", argc);
- break;
- }
- default:
- out.AddFormatted("minor: %d", minor_key);
- }
+ out.AddFormatted("minor: %d", minor_key);
} else {
out.AddFormatted(" %s", Code::Kind2String(kind));
}
diff --git a/deps/v8/src/diy-fp.cc b/deps/v8/src/diy-fp.cc
index b64f3407f8..44a9bb122e 100644
--- a/deps/v8/src/diy-fp.cc
+++ b/deps/v8/src/diy-fp.cc
@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <stdint.h>
-#include "src/base/logging.h"
#include "src/diy-fp.h"
-#include "src/globals.h"
+
+#include <stdint.h>
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/diy-fp.h b/deps/v8/src/diy-fp.h
index 31f787265e..e0daf27a1e 100644
--- a/deps/v8/src/diy-fp.h
+++ b/deps/v8/src/diy-fp.h
@@ -5,6 +5,10 @@
#ifndef V8_DIY_FP_H_
#define V8_DIY_FP_H_
+#include <stdint.h>
+
+#include "src/base/logging.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/effects.h b/deps/v8/src/effects.h
index 9481bb8875..e18baeafd6 100644
--- a/deps/v8/src/effects.h
+++ b/deps/v8/src/effects.h
@@ -5,8 +5,6 @@
#ifndef V8_EFFECTS_H_
#define V8_EFFECTS_H_
-#include "src/v8.h"
-
#include "src/types.h"
namespace v8 {
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
index 758b80ddbd..0d29c30472 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/elements-kind.cc
@@ -15,26 +15,17 @@ namespace internal {
int ElementsKindToShiftSize(ElementsKind elements_kind) {
switch (elements_kind) {
- case EXTERNAL_INT8_ELEMENTS:
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case INT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
return 0;
- case EXTERNAL_INT16_ELEMENTS:
- case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
case INT16_ELEMENTS:
return 1;
- case EXTERNAL_INT32_ELEMENTS:
- case EXTERNAL_UINT32_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
case UINT32_ELEMENTS:
case INT32_ELEMENTS:
case FLOAT32_ELEMENTS:
return 2;
- case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FLOAT64_ELEMENTS:
@@ -56,10 +47,8 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind) {
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
- if (IsExternalArrayElementsKind(elements_kind)) {
+ if (IsFixedTypedArrayElementsKind(elements_kind)) {
return 0;
- } else if (IsFixedTypedArrayElementsKind(elements_kind)) {
- return FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
} else {
return FixedArray::kHeaderSize - kHeapObjectTag;
}
@@ -121,23 +110,8 @@ int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
ElementsKind GetNextTransitionElementsKind(ElementsKind kind) {
- switch (kind) {
-#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: return EXTERNAL_##TYPE##_ELEMENTS;
-
- TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE)
-#undef FIXED_TYPED_ARRAY_CASE
- default: {
- int index = GetSequenceIndexFromFastElementsKind(kind);
- return GetFastElementsKindFromSequenceIndex(index + 1);
- }
- }
-}
-
-
-static bool IsTypedArrayElementsKind(ElementsKind elements_kind) {
- return IsFixedTypedArrayElementsKind(elements_kind) ||
- IsExternalArrayElementsKind(elements_kind);
+ int index = GetSequenceIndexFromFastElementsKind(kind);
+ return GetFastElementsKindFromSequenceIndex(index + 1);
}
@@ -148,18 +122,9 @@ static inline bool IsFastTransitionTarget(ElementsKind elements_kind) {
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind) {
- if (IsTypedArrayElementsKind(from_kind) ||
- IsTypedArrayElementsKind(to_kind)) {
- switch (from_kind) {
-#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
- return to_kind == EXTERNAL_##TYPE##_ELEMENTS;
-
- TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE);
-#undef FIXED_TYPED_ARRAY_CASE
- default:
- return false;
- }
+ if (IsFixedTypedArrayElementsKind(from_kind) ||
+ IsFixedTypedArrayElementsKind(to_kind)) {
+ return false;
}
if (IsFastElementsKind(from_kind) && IsFastTransitionTarget(to_kind)) {
switch (from_kind) {
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index b7d169b82e..0254a4fb59 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -5,6 +5,7 @@
#ifndef V8_ELEMENTS_KIND_H_
#define V8_ELEMENTS_KIND_H_
+#include "src/base/macros.h"
#include "src/checks.h"
namespace v8 {
@@ -32,17 +33,6 @@ enum ElementsKind {
FAST_SLOPPY_ARGUMENTS_ELEMENTS,
SLOW_SLOPPY_ARGUMENTS_ELEMENTS,
- // The "fast" kind for external arrays
- EXTERNAL_INT8_ELEMENTS,
- EXTERNAL_UINT8_ELEMENTS,
- EXTERNAL_INT16_ELEMENTS,
- EXTERNAL_UINT16_ELEMENTS,
- EXTERNAL_INT32_ELEMENTS,
- EXTERNAL_UINT32_ELEMENTS,
- EXTERNAL_FLOAT32_ELEMENTS,
- EXTERNAL_FLOAT64_ELEMENTS,
- EXTERNAL_UINT8_CLAMPED_ELEMENTS,
-
// Fixed typed arrays
UINT8_ELEMENTS,
INT8_ELEMENTS,
@@ -59,8 +49,6 @@ enum ElementsKind {
LAST_ELEMENTS_KIND = UINT8_CLAMPED_ELEMENTS,
FIRST_FAST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
LAST_FAST_ELEMENTS_KIND = FAST_HOLEY_DOUBLE_ELEMENTS,
- FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_INT8_ELEMENTS,
- LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_UINT8_CLAMPED_ELEMENTS,
FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_ELEMENTS,
LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_CLAMPED_ELEMENTS,
TERMINAL_FAST_ELEMENTS_KIND = FAST_HOLEY_ELEMENTS
@@ -96,26 +84,20 @@ inline bool IsSloppyArgumentsElements(ElementsKind kind) {
}
-inline bool IsExternalArrayElementsKind(ElementsKind kind) {
- return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
- kind <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND;
+inline bool IsFixedTypedArrayElementsKind(ElementsKind kind) {
+ return kind >= FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND &&
+ kind <= LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND;
}
inline bool IsTerminalElementsKind(ElementsKind kind) {
return kind == TERMINAL_FAST_ELEMENTS_KIND ||
- IsExternalArrayElementsKind(kind);
-}
-
-
-inline bool IsFixedTypedArrayElementsKind(ElementsKind kind) {
- return kind >= FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND &&
- kind <= LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND;
+ IsFixedTypedArrayElementsKind(kind);
}
inline bool IsFastElementsKind(ElementsKind kind) {
- DCHECK(FIRST_FAST_ELEMENTS_KIND == 0);
+ STATIC_ASSERT(FIRST_FAST_ELEMENTS_KIND == 0);
return kind <= FAST_HOLEY_DOUBLE_ELEMENTS;
}
@@ -132,21 +114,13 @@ inline bool IsFastDoubleElementsKind(ElementsKind kind) {
}
-inline bool IsExternalFloatOrDoubleElementsKind(ElementsKind kind) {
- return kind == EXTERNAL_FLOAT64_ELEMENTS ||
- kind == EXTERNAL_FLOAT32_ELEMENTS;
-}
-
-
inline bool IsFixedFloatElementsKind(ElementsKind kind) {
return kind == FLOAT32_ELEMENTS || kind == FLOAT64_ELEMENTS;
}
inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
- return IsFastDoubleElementsKind(kind) ||
- IsExternalFloatOrDoubleElementsKind(kind) ||
- IsFixedFloatElementsKind(kind);
+ return IsFastDoubleElementsKind(kind) || IsFixedFloatElementsKind(kind);
}
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index e830d7c465..3e80d5570b 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -27,15 +27,6 @@
// - FastPackedDoubleElementsAccessor
// - FastHoleyDoubleElementsAccessor
// - TypedElementsAccessor: template, with instantiations:
-// - ExternalInt8ElementsAccessor
-// - ExternalUint8ElementsAccessor
-// - ExternalInt16ElementsAccessor
-// - ExternalUint16ElementsAccessor
-// - ExternalInt32ElementsAccessor
-// - ExternalUint32ElementsAccessor
-// - ExternalFloat32ElementsAccessor
-// - ExternalFloat64ElementsAccessor
-// - ExternalUint8ClampedElementsAccessor
// - FixedUint8ElementsAccessor
// - FixedInt8ElementsAccessor
// - FixedUint16ElementsAccessor
@@ -79,23 +70,6 @@ static const int kPackedSizeNotKnown = -1;
FixedArray) \
V(SlowSloppyArgumentsElementsAccessor, SLOW_SLOPPY_ARGUMENTS_ELEMENTS, \
FixedArray) \
- V(ExternalInt8ElementsAccessor, EXTERNAL_INT8_ELEMENTS, ExternalInt8Array) \
- V(ExternalUint8ElementsAccessor, EXTERNAL_UINT8_ELEMENTS, \
- ExternalUint8Array) \
- V(ExternalInt16ElementsAccessor, EXTERNAL_INT16_ELEMENTS, \
- ExternalInt16Array) \
- V(ExternalUint16ElementsAccessor, EXTERNAL_UINT16_ELEMENTS, \
- ExternalUint16Array) \
- V(ExternalInt32ElementsAccessor, EXTERNAL_INT32_ELEMENTS, \
- ExternalInt32Array) \
- V(ExternalUint32ElementsAccessor, EXTERNAL_UINT32_ELEMENTS, \
- ExternalUint32Array) \
- V(ExternalFloat32ElementsAccessor, EXTERNAL_FLOAT32_ELEMENTS, \
- ExternalFloat32Array) \
- V(ExternalFloat64ElementsAccessor, EXTERNAL_FLOAT64_ELEMENTS, \
- ExternalFloat64Array) \
- V(ExternalUint8ClampedElementsAccessor, EXTERNAL_UINT8_CLAMPED_ELEMENTS, \
- ExternalUint8ClampedArray) \
V(FixedUint8ElementsAccessor, UINT8_ELEMENTS, FixedUint8Array) \
V(FixedInt8ElementsAccessor, INT8_ELEMENTS, FixedInt8Array) \
V(FixedUint16ElementsAccessor, UINT16_ELEMENTS, FixedUint16Array) \
@@ -271,6 +245,7 @@ static void CopyDoubleToObjectElements(FixedArrayBase* from_base,
}
}
}
+
DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
@@ -548,40 +523,25 @@ class ElementsAccessorBase : public ElementsAccessor {
*holder, *backing_store, index) != kMaxUInt32;
}
- virtual Handle<Object> Get(Handle<JSObject> holder, uint32_t index,
- Handle<FixedArrayBase> backing_store) final {
- if (!IsExternalArrayElementsKind(ElementsTraits::Kind) &&
- FLAG_trace_js_array_abuse) {
- CheckArrayAbuse(holder, "elements read", index);
- }
-
- if (IsExternalArrayElementsKind(ElementsTraits::Kind) &&
- FLAG_trace_external_array_abuse) {
- CheckArrayAbuse(holder, "external elements read", index);
- }
-
- return ElementsAccessorSubclass::GetImpl(holder, index, backing_store);
+ virtual Handle<Object> Get(Handle<FixedArrayBase> backing_store,
+ uint32_t entry) final {
+ return ElementsAccessorSubclass::GetImpl(backing_store, entry);
}
- static Handle<Object> GetImpl(Handle<JSObject> obj, uint32_t index,
- Handle<FixedArrayBase> backing_store) {
- if (index <
- ElementsAccessorSubclass::GetCapacityImpl(*obj, *backing_store)) {
- return BackingStore::get(Handle<BackingStore>::cast(backing_store),
- index);
- } else {
- return backing_store->GetIsolate()->factory()->the_hole_value();
- }
+ static Handle<Object> GetImpl(Handle<FixedArrayBase> backing_store,
+ uint32_t entry) {
+ uint32_t index = GetIndexForEntryImpl(*backing_store, entry);
+ return BackingStore::get(Handle<BackingStore>::cast(backing_store), index);
}
- virtual void Set(FixedArrayBase* backing_store, uint32_t index,
+ virtual void Set(FixedArrayBase* backing_store, uint32_t entry,
Object* value) final {
- ElementsAccessorSubclass::SetImpl(backing_store, index, value);
+ ElementsAccessorSubclass::SetImpl(backing_store, entry, value);
}
- static void SetImpl(FixedArrayBase* backing_store, uint32_t index,
+ static void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
Object* value) {
- BackingStore::cast(backing_store)->SetValue(index, value);
+ BackingStore::cast(backing_store)->SetValue(entry, value);
}
virtual void Reconfigure(Handle<JSObject> object,
@@ -599,19 +559,33 @@ class ElementsAccessorBase : public ElementsAccessor {
UNREACHABLE();
}
- virtual void Add(Handle<JSObject> object, uint32_t entry,
+ virtual void Add(Handle<JSObject> object, uint32_t index,
Handle<Object> value, PropertyAttributes attributes,
uint32_t new_capacity) final {
- ElementsAccessorSubclass::AddImpl(object, entry, value, attributes,
+ ElementsAccessorSubclass::AddImpl(object, index, value, attributes,
new_capacity);
}
- static void AddImpl(Handle<JSObject> object, uint32_t entry,
+ static void AddImpl(Handle<JSObject> object, uint32_t index,
Handle<Object> value, PropertyAttributes attributes,
uint32_t new_capacity) {
UNREACHABLE();
}
+ virtual uint32_t Push(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store, Object** objects,
+ uint32_t push_size, int direction) {
+ return ElementsAccessorSubclass::PushImpl(receiver, backing_store, objects,
+ push_size, direction);
+ }
+
+ static uint32_t PushImpl(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> elms_obj, Object** objects,
+ uint32_t push_size, int direction) {
+ UNREACHABLE();
+ return 0;
+ }
+
virtual void SetLength(Handle<JSArray> array, uint32_t length) final {
ElementsAccessorSubclass::SetLengthImpl(array, length,
handle(array->elements()));
@@ -758,10 +732,7 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t extra = 0;
for (uint32_t y = 0; y < len1; y++) {
if (ElementsAccessorSubclass::HasEntryImpl(*from, y)) {
- uint32_t index =
- ElementsAccessorSubclass::GetIndexForEntryImpl(*from, y);
- Handle<Object> value =
- ElementsAccessorSubclass::GetImpl(receiver, index, from);
+ Handle<Object> value = ElementsAccessorSubclass::GetImpl(from, y);
DCHECK(!value->IsTheHole());
DCHECK(!value->IsAccessorPair());
@@ -794,10 +765,7 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t entry = 0;
for (uint32_t y = 0; y < len1; y++) {
if (ElementsAccessorSubclass::HasEntryImpl(*from, y)) {
- uint32_t index =
- ElementsAccessorSubclass::GetIndexForEntryImpl(*from, y);
- Handle<Object> value =
- ElementsAccessorSubclass::GetImpl(receiver, index, from);
+ Handle<Object> value = ElementsAccessorSubclass::GetImpl(from, y);
DCHECK(!value->IsAccessorPair());
DCHECK(!value->IsExecutableAccessorInfo());
if (filter == FixedArray::NON_SYMBOL_KEYS && value->IsSymbol()) {
@@ -834,11 +802,17 @@ class ElementsAccessorBase : public ElementsAccessor {
static uint32_t GetEntryForIndexImpl(JSObject* holder,
FixedArrayBase* backing_store,
uint32_t index) {
- return index < ElementsAccessorSubclass::GetCapacityImpl(holder,
- backing_store) &&
- !BackingStore::cast(backing_store)->is_the_hole(index)
- ? index
- : kMaxUInt32;
+ if (IsHoleyElementsKind(kind())) {
+ return index < ElementsAccessorSubclass::GetCapacityImpl(holder,
+ backing_store) &&
+ !BackingStore::cast(backing_store)->is_the_hole(index)
+ ? index
+ : kMaxUInt32;
+ } else {
+ Smi* smi_length = Smi::cast(JSArray::cast(holder)->length());
+ uint32_t length = static_cast<uint32_t>(smi_length->value());
+ return index < length ? index : kMaxUInt32;
+ }
}
virtual uint32_t GetEntryForIndex(JSObject* holder,
@@ -945,22 +919,18 @@ class DictionaryElementsAccessor
obj->set_elements(*new_elements);
}
- static Handle<Object> GetImpl(Handle<JSObject> obj, uint32_t index,
- Handle<FixedArrayBase> store) {
- Handle<SeededNumberDictionary> backing_store =
- Handle<SeededNumberDictionary>::cast(store);
- Isolate* isolate = backing_store->GetIsolate();
- int entry = backing_store->FindEntry(index);
- if (entry != SeededNumberDictionary::kNotFound) {
- return handle(backing_store->ValueAt(entry), isolate);
- }
- return isolate->factory()->the_hole_value();
+ static Object* GetRaw(FixedArrayBase* store, uint32_t entry) {
+ SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
+ return backing_store->ValueAt(entry);
}
- static void SetImpl(FixedArrayBase* store, uint32_t index, Object* value) {
+ static Handle<Object> GetImpl(Handle<FixedArrayBase> store, uint32_t entry) {
+ Isolate* isolate = store->GetIsolate();
+ return handle(GetRaw(*store, entry), isolate);
+ }
+
+ static void SetImpl(FixedArrayBase* store, uint32_t entry, Object* value) {
SeededNumberDictionary* dictionary = SeededNumberDictionary::cast(store);
- int entry = dictionary->FindEntry(index);
- DCHECK_NE(SeededNumberDictionary::kNotFound, entry);
dictionary->ValueAtPut(entry, value);
}
@@ -969,7 +939,7 @@ class DictionaryElementsAccessor
Handle<Object> value,
PropertyAttributes attributes) {
SeededNumberDictionary* dictionary = SeededNumberDictionary::cast(*store);
- if (attributes != NONE) dictionary->set_requires_slow_elements();
+ if (attributes != NONE) object->RequireSlowElements(dictionary);
dictionary->ValueAtPut(entry, *value);
PropertyDetails details = dictionary->DetailsAt(entry);
details = PropertyDetails(attributes, DATA, details.dictionary_index(),
@@ -977,7 +947,7 @@ class DictionaryElementsAccessor
dictionary->DetailsAtPut(entry, details);
}
- static void AddImpl(Handle<JSObject> object, uint32_t entry,
+ static void AddImpl(Handle<JSObject> object, uint32_t index,
Handle<Object> value, PropertyAttributes attributes,
uint32_t new_capacity) {
PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
@@ -986,9 +956,10 @@ class DictionaryElementsAccessor
? JSObject::NormalizeElements(object)
: handle(SeededNumberDictionary::cast(object->elements()));
Handle<SeededNumberDictionary> new_dictionary =
- SeededNumberDictionary::AddNumberEntry(dictionary, entry, value,
- details);
- if (attributes != NONE) new_dictionary->set_requires_slow_elements();
+ SeededNumberDictionary::AddNumberEntry(
+ dictionary, index, value, details,
+ object->map()->is_prototype_map());
+ if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
if (dictionary.is_identical_to(new_dictionary)) return;
object->set_elements(*new_dictionary);
}
@@ -1121,7 +1092,7 @@ class FastElementsAccessor
value, attributes);
}
- static void AddImpl(Handle<JSObject> object, uint32_t entry,
+ static void AddImpl(Handle<JSObject> object, uint32_t index,
Handle<Object> value, PropertyAttributes attributes,
uint32_t new_capacity) {
DCHECK_EQ(NONE, attributes);
@@ -1143,7 +1114,7 @@ class FastElementsAccessor
JSObject::EnsureWritableFastElements(object);
}
}
- FastElementsAccessorSubclass::SetImpl(object->elements(), entry, *value);
+ FastElementsAccessorSubclass::SetImpl(object->elements(), index, *value);
}
static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
@@ -1185,6 +1156,53 @@ class FastElementsAccessor
}
#endif
}
+
+ static uint32_t PushImpl(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store,
+ Object** objects, uint32_t push_size,
+ int direction) {
+ uint32_t len = Smi::cast(receiver->length())->value();
+ if (push_size == 0) {
+ return len;
+ }
+ uint32_t elms_len = backing_store->length();
+ // Currently fixed arrays cannot grow too big, so
+ // we should never hit this case.
+ DCHECK(push_size <= static_cast<uint32_t>(Smi::kMaxValue - len));
+ uint32_t new_length = len + push_size;
+ Handle<FixedArrayBase> new_elms;
+
+ if (new_length > elms_len) {
+ // New backing storage is needed.
+ uint32_t capacity = new_length + (new_length >> 1) + 16;
+ new_elms = FastElementsAccessorSubclass::ConvertElementsWithCapacity(
+ receiver, backing_store, KindTraits::Kind, capacity);
+ } else {
+ // push_size is > 0 and new_length <= elms_len, so backing_store cannot be
+ // the
+ // empty_fixed_array.
+ new_elms = backing_store;
+ }
+
+ // Add the provided values.
+ DisallowHeapAllocation no_gc;
+ DCHECK(direction == ElementsAccessor::kDirectionForward ||
+ direction == ElementsAccessor::kDirectionReverse);
+ STATIC_ASSERT(ElementsAccessor::kDirectionForward == 1);
+ STATIC_ASSERT(ElementsAccessor::kDirectionReverse == -1);
+ for (uint32_t index = 0; index < push_size; index++) {
+ int offset = direction * index;
+ Object* object = objects[offset];
+ FastElementsAccessorSubclass::SetImpl(*new_elms, index + len, object);
+ }
+ if (!new_elms.is_identical_to(backing_store)) {
+ receiver->set_elements(*new_elms);
+ }
+ DCHECK(*new_elms == receiver->elements());
+ // Set the length.
+ receiver->set_length(Smi::FromInt(new_length));
+ return new_length;
+ }
};
@@ -1197,6 +1215,12 @@ class FastSmiOrObjectElementsAccessor
: FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits>(name) {}
+ static Object* GetRaw(FixedArray* backing_store, uint32_t entry) {
+ uint32_t index = FastElementsAccessorSubclass::GetIndexForEntryImpl(
+ backing_store, entry);
+ return backing_store->get(index);
+ }
+
// NOTE: this method violates the handlified function signature convention:
// raw pointer parameters in the function that allocates.
// See ElementsAccessor::CopyElements() for details.
@@ -1231,7 +1255,6 @@ class FastSmiOrObjectElementsAccessor
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: \
case TYPE##_ELEMENTS: \
UNREACHABLE();
TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -1328,7 +1351,6 @@ class FastDoubleElementsAccessor
UNREACHABLE();
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: \
case TYPE##_ELEMENTS: \
UNREACHABLE();
TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -1375,14 +1397,10 @@ class TypedElementsAccessor
typedef typename ElementsKindTraits<Kind>::BackingStore BackingStore;
typedef TypedElementsAccessor<Kind> AccessorClass;
- static Handle<Object> GetImpl(Handle<JSObject> obj, uint32_t index,
- Handle<FixedArrayBase> backing_store) {
- if (index < AccessorClass::GetCapacityImpl(*obj, *backing_store)) {
- return BackingStore::get(Handle<BackingStore>::cast(backing_store),
- index);
- } else {
- return backing_store->GetIsolate()->factory()->undefined_value();
- }
+ static Handle<Object> GetImpl(Handle<FixedArrayBase> backing_store,
+ uint32_t entry) {
+ uint32_t index = GetIndexForEntryImpl(*backing_store, entry);
+ return BackingStore::get(Handle<BackingStore>::cast(backing_store), index);
}
static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
@@ -1400,6 +1418,11 @@ class TypedElementsAccessor
UNREACHABLE();
}
+ static uint32_t GetIndexForEntryImpl(FixedArrayBase* backing_store,
+ uint32_t entry) {
+ return entry;
+ }
+
static uint32_t GetEntryForIndexImpl(JSObject* holder,
FixedArrayBase* backing_store,
uint32_t index) {
@@ -1418,13 +1441,6 @@ class TypedElementsAccessor
-#define EXTERNAL_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype, size) \
- typedef TypedElementsAccessor<EXTERNAL_##TYPE##_ELEMENTS> \
- External##Type##ElementsAccessor;
-
-TYPED_ARRAYS(EXTERNAL_ELEMENTS_ACCESSOR)
-#undef EXTERNAL_ELEMENTS_ACCESSOR
-
#define FIXED_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype, size) \
typedef TypedElementsAccessor<TYPE##_ELEMENTS > \
Fixed##Type##ElementsAccessor;
@@ -1441,35 +1457,38 @@ class SloppyArgumentsElementsAccessor
public:
explicit SloppyArgumentsElementsAccessor(const char* name)
: ElementsAccessorBase<SloppyArgumentsElementsAccessorSubclass,
- KindTraits>(name) {}
+ KindTraits>(name) {
+ USE(KindTraits::Kind);
+ }
- static Handle<Object> GetImpl(Handle<JSObject> obj, uint32_t index,
- Handle<FixedArrayBase> parameters) {
- Isolate* isolate = obj->GetIsolate();
+ static Handle<Object> GetImpl(Handle<FixedArrayBase> parameters,
+ uint32_t entry) {
+ Isolate* isolate = parameters->GetIsolate();
Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters);
- Handle<Object> probe(GetParameterMapArg(*parameter_map, index), isolate);
- if (!probe->IsTheHole()) {
+ uint32_t length = parameter_map->length() - 2;
+ if (entry < length) {
DisallowHeapAllocation no_gc;
+ Object* probe = parameter_map->get(entry + 2);
Context* context = Context::cast(parameter_map->get(0));
- int context_entry = Handle<Smi>::cast(probe)->value();
+ int context_entry = Smi::cast(probe)->value();
DCHECK(!context->get(context_entry)->IsTheHole());
return handle(context->get(context_entry), isolate);
} else {
// Object is not mapped, defer to the arguments.
Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)),
isolate);
- Handle<Object> result = ArgumentsAccessor::GetImpl(obj, index, arguments);
+ Handle<Object> result =
+ ArgumentsAccessor::GetImpl(arguments, entry - length);
// Elements of the arguments object in slow mode might be slow aliases.
if (result->IsAliasedArgumentsEntry()) {
DisallowHeapAllocation no_gc;
- AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(*result);
+ AliasedArgumentsEntry* alias = AliasedArgumentsEntry::cast(*result);
Context* context = Context::cast(parameter_map->get(0));
- int context_entry = entry->aliased_context_slot();
+ int context_entry = alias->aliased_context_slot();
DCHECK(!context->get(context_entry)->IsTheHole());
return handle(context->get(context_entry), isolate);
- } else {
- return result;
}
+ return result;
}
}
@@ -1478,17 +1497,27 @@ class SloppyArgumentsElementsAccessor
UNREACHABLE();
}
- static void SetImpl(FixedArrayBase* store, uint32_t index, Object* value) {
+ static void SetImpl(FixedArrayBase* store, uint32_t entry, Object* value) {
FixedArray* parameter_map = FixedArray::cast(store);
- Object* probe = GetParameterMapArg(parameter_map, index);
- if (!probe->IsTheHole()) {
+ uint32_t length = parameter_map->length() - 2;
+ if (entry < length) {
+ Object* probe = parameter_map->get(entry + 2);
Context* context = Context::cast(parameter_map->get(0));
int context_entry = Smi::cast(probe)->value();
DCHECK(!context->get(context_entry)->IsTheHole());
context->set(context_entry, value);
} else {
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- ArgumentsAccessor::SetImpl(arguments, index, value);
+ Object* current = ArgumentsAccessor::GetRaw(arguments, entry - length);
+ if (current->IsAliasedArgumentsEntry()) {
+ AliasedArgumentsEntry* alias = AliasedArgumentsEntry::cast(current);
+ Context* context = Context::cast(parameter_map->get(0));
+ int context_entry = alias->aliased_context_slot();
+ DCHECK(!context->get(context_entry)->IsTheHole());
+ context->set(context_entry, value);
+ } else {
+ ArgumentsAccessor::SetImpl(arguments, entry - length, value);
+ }
}
}
@@ -1548,9 +1577,8 @@ class SloppyArgumentsElementsAccessor
if (entry < length) {
return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
}
- entry -= length;
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- return ArgumentsAccessor::GetDetailsImpl(arguments, entry);
+ return ArgumentsAccessor::GetDetailsImpl(arguments, entry - length);
}
static Object* GetParameterMapArg(FixedArray* parameter_map, uint32_t index) {
@@ -1612,9 +1640,10 @@ class SlowSloppyArgumentsElementsAccessor
: JSObject::NormalizeElements(object);
PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
Handle<SeededNumberDictionary> new_dictionary =
- SeededNumberDictionary::AddNumberEntry(dictionary, index, value,
- details);
- if (attributes != NONE) new_dictionary->set_requires_slow_elements();
+ SeededNumberDictionary::AddNumberEntry(
+ dictionary, index, value, details,
+ object->map()->is_prototype_map());
+ if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
if (*dictionary != *new_dictionary) {
FixedArray::cast(object->elements())->set(1, *new_dictionary);
}
@@ -1645,8 +1674,12 @@ class SlowSloppyArgumentsElementsAccessor
PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
Handle<SeededNumberDictionary> arguments(
SeededNumberDictionary::cast(parameter_map->get(1)));
- arguments = SeededNumberDictionary::AddNumberEntry(arguments, entry,
- value, details);
+ arguments = SeededNumberDictionary::AddNumberEntry(
+ arguments, entry, value, details, object->map()->is_prototype_map());
+ // If the attributes were NONE, we would have called set rather than
+ // reconfigure.
+ DCHECK_NE(NONE, attributes);
+ object->RequireSlowElements(*arguments);
parameter_map->set(1, *arguments);
} else {
Handle<FixedArrayBase> arguments(
@@ -1686,7 +1719,13 @@ class FastSloppyArgumentsElementsAccessor
static_cast<uint32_t>(old_elements->length()) < new_capacity) {
GrowCapacityAndConvertImpl(object, new_capacity);
}
- SetImpl(object->elements(), index, *value);
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ // For fast holey objects, the entry equals the index. The code above made
+ // sure that there's enough space to store the value. We cannot convert
+ // index to entry explicitly since the slot still contains the hole, so the
+ // current EntryForIndex would indicate that it is "absent" by returning
+ // kMaxUInt32.
+ FastHoleyObjectElementsAccessor::SetImpl(arguments, index, *value);
}
static void ReconfigureImpl(Handle<JSObject> object,
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 9005096a1e..0131f0baf0 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -38,17 +38,8 @@ class ElementsAccessor {
return HasElement(holder, index, handle(holder->elements()));
}
- // Returns the element with the specified index or undefined if there is no
- // such element. This method doesn't iterate up the prototype chain. The
- // caller can optionally pass in the backing store to use for the check, which
- // must be compatible with the ElementsKind of the ElementsAccessor. If
- // backing_store is NULL, the holder->elements() is used as the backing store.
- virtual Handle<Object> Get(Handle<JSObject> holder, uint32_t index,
- Handle<FixedArrayBase> backing_store) = 0;
-
- inline Handle<Object> Get(Handle<JSObject> holder, uint32_t index) {
- return Get(holder, index, handle(holder->elements()));
- }
+ virtual Handle<Object> Get(Handle<FixedArrayBase> backing_store,
+ uint32_t entry) = 0;
// Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of
@@ -69,6 +60,9 @@ class ElementsAccessor {
// destination array with the hole.
static const int kCopyToEndAndInitializeToHole = -2;
+ static const int kDirectionForward = 1;
+ static const int kDirectionReverse = -1;
+
// Copy elements from one backing store to another. Typically, callers specify
// the source JSObject or JSArray in source_holder. If the holder's backing
// store is available, it can be passed in source and source_holder is
@@ -119,16 +113,24 @@ class ElementsAccessor {
static void InitializeOncePerProcess();
static void TearDown();
- virtual void Set(FixedArrayBase* backing_store, uint32_t index,
+ virtual void Set(FixedArrayBase* backing_store, uint32_t entry,
Object* value) = 0;
+
virtual void Reconfigure(Handle<JSObject> object,
Handle<FixedArrayBase> backing_store, uint32_t entry,
Handle<Object> value,
PropertyAttributes attributes) = 0;
- virtual void Add(Handle<JSObject> object, uint32_t entry,
+
+ virtual void Add(Handle<JSObject> object, uint32_t index,
Handle<Object> value, PropertyAttributes attributes,
uint32_t new_capacity) = 0;
+ // TODO(cbruni): Consider passing Arguments* instead of Object** depending on
+ // the requirements of future callers.
+ virtual uint32_t Push(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store, Object** objects,
+ uint32_t start, int direction) = 0;
+
protected:
friend class LookupIterator;
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index fc0e50e553..c2033777f2 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -341,14 +341,6 @@ MaybeHandle<Object> Execution::TryGetConstructorDelegate(
}
-void StackGuard::EnableInterrupts() {
- ExecutionAccess access(isolate_);
- if (has_pending_interrupts(access)) {
- set_interrupt_limits(access);
- }
-}
-
-
void StackGuard::SetStackLimit(uintptr_t limit) {
ExecutionAccess access(isolate_);
// If the current limits are special (e.g. due to a pending interrupt) then
@@ -365,6 +357,27 @@ void StackGuard::SetStackLimit(uintptr_t limit) {
}
+void StackGuard::AdjustStackLimitForSimulator() {
+ ExecutionAccess access(isolate_);
+ uintptr_t climit = thread_local_.real_climit_;
+ // If the current limits are special (e.g. due to a pending interrupt) then
+ // leave them alone.
+ uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, climit);
+ if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
+ thread_local_.set_jslimit(jslimit);
+ isolate_->heap()->SetStackLimits();
+ }
+}
+
+
+void StackGuard::EnableInterrupts() {
+ ExecutionAccess access(isolate_);
+ if (has_pending_interrupts(access)) {
+ set_interrupt_limits(access);
+ }
+}
+
+
void StackGuard::DisableInterrupts() {
ExecutionAccess access(isolate_);
reset_limits(access);
@@ -520,13 +533,11 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
// --- C a l l s t o n a t i v e s ---
-#define RETURN_NATIVE_CALL(name, args) \
- do { \
- Handle<Object> argv[] = args; \
- return Call(isolate, \
- isolate->name##_fun(), \
- isolate->js_builtins_object(), \
- arraysize(argv), argv); \
+#define RETURN_NATIVE_CALL(name, args) \
+ do { \
+ Handle<Object> argv[] = args; \
+ return Call(isolate, isolate->name##_fun(), \
+ isolate->factory()->undefined_value(), arraysize(argv), argv); \
} while (false)
@@ -548,31 +559,12 @@ MaybeHandle<Object> Execution::ToDetailString(
}
-MaybeHandle<Object> Execution::ToObject(
- Isolate* isolate, Handle<Object> obj) {
- if (obj->IsSpecObject()) return obj;
- RETURN_NATIVE_CALL(to_object, { obj });
-}
-
-
MaybeHandle<Object> Execution::ToInteger(
Isolate* isolate, Handle<Object> obj) {
RETURN_NATIVE_CALL(to_integer, { obj });
}
-MaybeHandle<Object> Execution::ToUint32(
- Isolate* isolate, Handle<Object> obj) {
- RETURN_NATIVE_CALL(to_uint32, { obj });
-}
-
-
-MaybeHandle<Object> Execution::ToInt32(
- Isolate* isolate, Handle<Object> obj) {
- RETURN_NATIVE_CALL(to_int32, { obj });
-}
-
-
MaybeHandle<Object> Execution::ToLength(
Isolate* isolate, Handle<Object> obj) {
RETURN_NATIVE_CALL(to_length, { obj });
@@ -588,6 +580,30 @@ MaybeHandle<Object> Execution::NewDate(Isolate* isolate, double time) {
#undef RETURN_NATIVE_CALL
+MaybeHandle<Object> Execution::ToInt32(Isolate* isolate, Handle<Object> obj) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, Execution::ToNumber(isolate, obj),
+ Object);
+ return isolate->factory()->NewNumberFromInt(DoubleToInt32(obj->Number()));
+}
+
+
+MaybeHandle<Object> Execution::ToObject(Isolate* isolate, Handle<Object> obj) {
+ Handle<JSReceiver> receiver;
+ if (JSReceiver::ToObject(isolate, obj).ToHandle(&receiver)) {
+ return receiver;
+ }
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject), Object);
+}
+
+
+MaybeHandle<Object> Execution::ToUint32(Isolate* isolate, Handle<Object> obj) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, Execution::ToNumber(isolate, obj),
+ Object);
+ return isolate->factory()->NewNumberFromUint(DoubleToUint32(obj->Number()));
+}
+
+
MaybeHandle<JSRegExp> Execution::NewJSRegExp(Handle<String> pattern,
Handle<String> flags) {
Isolate* isolate = pattern->GetIsolate();
@@ -610,9 +626,7 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
Handle<Object> args[] = { recv, fun, pos, is_global };
MaybeHandle<Object> maybe_result =
TryCall(isolate->get_stack_trace_line_fun(),
- isolate->js_builtins_object(),
- arraysize(args),
- args);
+ isolate->factory()->undefined_value(), arraysize(args), args);
Handle<Object> result;
if (!maybe_result.ToHandle(&result) || !result->IsString()) {
return isolate->factory()->empty_string();
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index fd7636db96..d783e5c28b 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -5,11 +5,17 @@
#ifndef V8_EXECUTION_H_
#define V8_EXECUTION_H_
+#include "src/allocation.h"
+#include "src/base/atomicops.h"
#include "src/handles.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
+// Forward declarations.
+class JSRegExp;
+
class Execution final : public AllStatic {
public:
// Call a function, the caller supplies a receiver and an array
@@ -130,6 +136,11 @@ class StackGuard final {
// is assumed to grow downwards.
void SetStackLimit(uintptr_t limit);
+ // The simulator uses a separate JS stack. Limits on the JS stack might have
+ // to be adjusted in order to reflect overflows of the C stack, because we
+ // cannot rely on the interleaving of frames on the simulator.
+ void AdjustStackLimitForSimulator();
+
// Threading support.
char* ArchiveStackGuard(char* to);
char* RestoreStackGuard(char* from);
diff --git a/deps/v8/src/expression-classifier.h b/deps/v8/src/expression-classifier.h
index 6edb99e838..17a377890a 100644
--- a/deps/v8/src/expression-classifier.h
+++ b/deps/v8/src/expression-classifier.h
@@ -5,8 +5,6 @@
#ifndef V8_EXPRESSION_CLASSIFIER_H
#define V8_EXPRESSION_CLASSIFIER_H
-#include "src/v8.h"
-
#include "src/messages.h"
#include "src/scanner.h"
#include "src/token.h"
@@ -157,8 +155,7 @@ class ExpressionClassifier {
if (!is_valid_formal_parameter_list_without_duplicates()) return;
invalid_productions_ |= DistinctFormalParametersProduction;
duplicate_formal_parameter_error_.location = loc;
- duplicate_formal_parameter_error_.message =
- MessageTemplate::kStrictParamDupe;
+ duplicate_formal_parameter_error_.message = MessageTemplate::kParamDupe;
duplicate_formal_parameter_error_.arg = nullptr;
}
@@ -226,18 +223,6 @@ class ExpressionClassifier {
}
}
- void AccumulateReclassifyingAsPattern(const ExpressionClassifier& inner) {
- Accumulate(inner, AllProductions & ~PatternProductions);
- if (!inner.is_valid_expression()) {
- if (is_valid_binding_pattern()) {
- binding_pattern_error_ = inner.expression_error();
- }
- if (is_valid_assignment_pattern()) {
- assignment_pattern_error_ = inner.expression_error();
- }
- }
- }
-
private:
unsigned invalid_productions_;
Error expression_error_;
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index aedfb5e93f..3eaa70e515 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -4,6 +4,10 @@
#include "src/extensions/externalize-string-extension.h"
+#include "src/api.h"
+#include "src/handles.h"
+#include "src/isolate.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/externalize-string-extension.h b/deps/v8/src/extensions/externalize-string-extension.h
index dc23ffd2d3..009e818497 100644
--- a/deps/v8/src/extensions/externalize-string-extension.h
+++ b/deps/v8/src/extensions/externalize-string-extension.h
@@ -5,7 +5,7 @@
#ifndef V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
#define V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
-#include "src/v8.h"
+#include "include/v8.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/free-buffer-extension.h b/deps/v8/src/extensions/free-buffer-extension.h
index d62ed02b86..bb1418c4a3 100644
--- a/deps/v8/src/extensions/free-buffer-extension.h
+++ b/deps/v8/src/extensions/free-buffer-extension.h
@@ -5,7 +5,7 @@
#ifndef V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_
#define V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_
-#include "src/v8.h"
+#include "include/v8.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/gc-extension.h b/deps/v8/src/extensions/gc-extension.h
index 2462bd9604..91433911c2 100644
--- a/deps/v8/src/extensions/gc-extension.h
+++ b/deps/v8/src/extensions/gc-extension.h
@@ -5,7 +5,8 @@
#ifndef V8_EXTENSIONS_GC_EXTENSION_H_
#define V8_EXTENSIONS_GC_EXTENSION_H_
-#include "src/v8.h"
+#include "include/v8.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index eb0fed80d5..495167695a 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -4,6 +4,8 @@
#include "src/extensions/statistics-extension.h"
+#include "src/v8.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/statistics-extension.h b/deps/v8/src/extensions/statistics-extension.h
index 8149e44afe..5dac4097b6 100644
--- a/deps/v8/src/extensions/statistics-extension.h
+++ b/deps/v8/src/extensions/statistics-extension.h
@@ -5,7 +5,7 @@
#ifndef V8_EXTENSIONS_STATISTICS_EXTENSION_H_
#define V8_EXTENSIONS_STATISTICS_EXTENSION_H_
-#include "src/v8.h"
+#include "include/v8.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/trigger-failure-extension.h b/deps/v8/src/extensions/trigger-failure-extension.h
index c01b37d3e9..4b10bdc886 100644
--- a/deps/v8/src/extensions/trigger-failure-extension.h
+++ b/deps/v8/src/extensions/trigger-failure-extension.h
@@ -5,7 +5,7 @@
#ifndef V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
#define V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
-#include "src/v8.h"
+#include "include/v8.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index f0967c7200..db11ebb390 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -62,12 +62,11 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
}
-Handle<Oddball> Factory::NewOddball(Handle<Map> map,
- const char* to_string,
+Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number,
- byte kind) {
+ const char* type_of, byte kind) {
Handle<Oddball> oddball = New<Oddball>(map, OLD_SPACE);
- Oddball::Initialize(isolate(), oddball, to_string, to_number, kind);
+ Oddball::Initialize(isolate(), oddball, to_string, to_number, type_of, kind);
return oddball;
}
@@ -874,18 +873,24 @@ Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
}
-Handle<ExternalArray> Factory::NewExternalArray(int length,
- ExternalArrayType array_type,
- void* external_pointer,
- PretenureFlag pretenure) {
+Handle<BytecodeArray> Factory::NewBytecodeArray(int length,
+ const byte* raw_bytecodes,
+ int frame_size) {
+ DCHECK(0 <= length);
+ CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateBytecodeArray(
+ length, raw_bytecodes, frame_size),
+ BytecodeArray);
+}
+
+
+Handle<FixedTypedArrayBase> Factory::NewFixedTypedArrayWithExternalPointer(
+ int length, ExternalArrayType array_type, void* external_pointer,
+ PretenureFlag pretenure) {
DCHECK(0 <= length && length <= Smi::kMaxValue);
CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateExternalArray(length,
- array_type,
- external_pointer,
- pretenure),
- ExternalArray);
+ isolate(), isolate()->heap()->AllocateFixedTypedArrayWithExternalPointer(
+ length, array_type, external_pointer, pretenure),
+ FixedTypedArrayBase);
}
@@ -973,6 +978,15 @@ Handle<FixedArray> Factory::CopyFixedArrayWithMap(Handle<FixedArray> array,
}
+Handle<FixedArray> Factory::CopyFixedArrayAndGrow(Handle<FixedArray> array,
+ int grow_by,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->CopyFixedArrayAndGrow(
+ *array, grow_by, pretenure),
+ FixedArray);
+}
+
+
Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->CopyFixedArray(*array),
@@ -1042,43 +1056,41 @@ Handle<HeapNumber> Factory::NewHeapNumber(double value,
}
-Handle<Float32x4> Factory::NewFloat32x4(float w, float x, float y, float z,
- PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(), isolate()->heap()->AllocateFloat32x4(w, x, y, z, pretenure),
- Float32x4);
-}
+#define SIMD128_NEW_DEF(TYPE, Type, type, lane_count, lane_type) \
+ Handle<Type> Factory::New##Type(lane_type lanes[lane_count], \
+ PretenureFlag pretenure) { \
+ CALL_HEAP_FUNCTION( \
+ isolate(), isolate()->heap()->Allocate##Type(lanes, pretenure), Type); \
+ }
+SIMD128_TYPES(SIMD128_NEW_DEF)
+#undef SIMD128_NEW_DEF
-Handle<Object> Factory::NewError(const char* maker,
+Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
MessageTemplate::Template template_index,
Handle<Object> arg0, Handle<Object> arg1,
Handle<Object> arg2) {
HandleScope scope(isolate());
- Handle<String> error_maker = InternalizeUtf8String(maker);
if (isolate()->bootstrapper()->IsActive()) {
- // If this exception is being thrown during bootstrapping,
- // js_builtins_object is unavailable. We return the error maker
- // name's string as the exception since we have nothing better
- // to do.
- return scope.CloseAndEscape(error_maker);
+ // During bootstrapping we cannot construct error objects.
+ return scope.CloseAndEscape(NewStringFromAsciiChecked(
+ MessageTemplate::TemplateString(template_index)));
}
- Handle<Object> fun_obj = Object::GetProperty(isolate()->js_builtins_object(),
- error_maker).ToHandleChecked();
- Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
+ Handle<JSFunction> fun = isolate()->make_error_function();
Handle<Object> message_type(Smi::FromInt(template_index), isolate());
if (arg0.is_null()) arg0 = undefined_value();
if (arg1.is_null()) arg1 = undefined_value();
if (arg2.is_null()) arg2 = undefined_value();
- Handle<Object> argv[] = {message_type, arg0, arg1, arg2};
+ Handle<Object> argv[] = {constructor, message_type, arg0, arg1, arg2};
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
Handle<Object> result;
MaybeHandle<Object> exception;
- if (!Execution::TryCall(fun, isolate()->js_builtins_object(), arraysize(argv),
- argv, &exception).ToHandle(&result)) {
+ if (!Execution::TryCall(fun, undefined_value(), arraysize(argv), argv,
+ &exception)
+ .ToHandle(&result)) {
Handle<Object> exception_obj;
if (exception.ToHandle(&exception_obj)) {
result = exception_obj;
@@ -1090,131 +1102,17 @@ Handle<Object> Factory::NewError(const char* maker,
}
-Handle<Object> Factory::NewError(MessageTemplate::Template template_index,
- Handle<Object> arg0, Handle<Object> arg1,
- Handle<Object> arg2) {
- return NewError("MakeError", template_index, arg0, arg1, arg2);
-}
-
-
-Handle<Object> Factory::NewTypeError(MessageTemplate::Template template_index,
- Handle<Object> arg0, Handle<Object> arg1,
- Handle<Object> arg2) {
- return NewError("MakeTypeError", template_index, arg0, arg1, arg2);
-}
-
-
-Handle<Object> Factory::NewSyntaxError(MessageTemplate::Template template_index,
- Handle<Object> arg0, Handle<Object> arg1,
- Handle<Object> arg2) {
- return NewError("MakeSyntaxError", template_index, arg0, arg1, arg2);
-}
-
-
-Handle<Object> Factory::NewReferenceError(
- MessageTemplate::Template template_index, Handle<Object> arg0,
- Handle<Object> arg1, Handle<Object> arg2) {
- return NewError("MakeReferenceError", template_index, arg0, arg1, arg2);
-}
-
-
-Handle<Object> Factory::NewRangeError(MessageTemplate::Template template_index,
- Handle<Object> arg0, Handle<Object> arg1,
- Handle<Object> arg2) {
- return NewError("MakeRangeError", template_index, arg0, arg1, arg2);
-}
-
-
-Handle<Object> Factory::NewEvalError(MessageTemplate::Template template_index,
- Handle<Object> arg0, Handle<Object> arg1,
- Handle<Object> arg2) {
- return NewError("MakeEvalError", template_index, arg0, arg1, arg2);
-}
-
-
-Handle<String> Factory::EmergencyNewError(const char* message,
- Handle<JSArray> args) {
- const int kBufferSize = 1000;
- char buffer[kBufferSize];
- size_t space = kBufferSize;
- char* p = &buffer[0];
-
- Vector<char> v(buffer, kBufferSize);
- StrNCpy(v, message, space);
- space -= Min(space, strlen(message));
- p = &buffer[kBufferSize] - space;
-
- for (int i = 0; i < Smi::cast(args->length())->value(); i++) {
- if (space > 0) {
- *p++ = ' ';
- space--;
- if (space > 0) {
- Handle<String> arg_str = Handle<String>::cast(
- Object::GetElement(isolate(), args, i).ToHandleChecked());
- SmartArrayPointer<char> arg = arg_str->ToCString();
- Vector<char> v2(p, static_cast<int>(space));
- StrNCpy(v2, arg.get(), space);
- space -= Min(space, strlen(arg.get()));
- p = &buffer[kBufferSize] - space;
- }
- }
- }
- if (space > 0) {
- *p = '\0';
- } else {
- buffer[kBufferSize - 1] = '\0';
- }
- return NewStringFromUtf8(CStrVector(buffer), TENURED).ToHandleChecked();
-}
-
-
-Handle<Object> Factory::NewError(const char* maker, const char* message,
- Handle<JSArray> args) {
- Handle<String> make_str = InternalizeUtf8String(maker);
- Handle<Object> fun_obj = Object::GetProperty(
- isolate()->js_builtins_object(), make_str).ToHandleChecked();
- // If the builtins haven't been properly configured yet this error
- // constructor may not have been defined. Bail out.
- if (!fun_obj->IsJSFunction()) {
- return EmergencyNewError(message, args);
- }
- Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
- Handle<Object> message_obj = InternalizeUtf8String(message);
- Handle<Object> argv[] = { message_obj, args };
-
- // Invoke the JavaScript factory method. If an exception is thrown while
- // running the factory method, use the exception as the result.
- Handle<Object> result;
- MaybeHandle<Object> exception;
- if (!Execution::TryCall(fun,
- isolate()->js_builtins_object(),
- arraysize(argv),
- argv,
- &exception).ToHandle(&result)) {
- Handle<Object> exception_obj;
- if (exception.ToHandle(&exception_obj)) return exception_obj;
- return undefined_value();
- }
- return result;
-}
-
-
-Handle<Object> Factory::NewError(const char* constructor,
+Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
Handle<String> message) {
- Handle<String> constr = InternalizeUtf8String(constructor);
- Handle<JSFunction> fun = Handle<JSFunction>::cast(Object::GetProperty(
- isolate()->js_builtins_object(), constr).ToHandleChecked());
Handle<Object> argv[] = { message };
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
Handle<Object> result;
MaybeHandle<Object> exception;
- if (!Execution::TryCall(fun,
- isolate()->js_builtins_object(),
- arraysize(argv),
- argv,
- &exception).ToHandle(&result)) {
+ if (!Execution::TryCall(constructor, undefined_value(), arraysize(argv), argv,
+ &exception)
+ .ToHandle(&result)) {
Handle<Object> exception_obj;
if (exception.ToHandle(&exception_obj)) return exception_obj;
return undefined_value();
@@ -1548,7 +1446,7 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
// Make sure we don't have a ton of pre-allocated slots in the
// global objects. They will be unused once we normalize the object.
DCHECK(map->unused_property_fields() == 0);
- DCHECK(map->inobject_properties() == 0);
+ DCHECK(map->GetInObjectProperties() == 0);
// Initial size of the backing store to avoid resize of the storage during
// bootstrapping. The size differs between the JS global object ad the
@@ -1597,14 +1495,12 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
Handle<JSObject> Factory::NewJSObjectFromMap(
Handle<Map> map,
PretenureFlag pretenure,
- bool alloc_props,
Handle<AllocationSite> allocation_site) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSObjectFromMap(
*map,
pretenure,
- alloc_props,
allocation_site.is_null() ? NULL : *allocation_site),
JSObject);
}
@@ -1758,11 +1654,11 @@ ElementsKind GetExternalArrayElementsKind(ExternalArrayType type) {
switch (type) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return EXTERNAL_##TYPE##_ELEMENTS;
+ return TYPE##_ELEMENTS;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
}
UNREACHABLE();
- return FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND;
+ return FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND;
#undef TYPED_ARRAY_CASE
}
@@ -1904,7 +1800,7 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
Handle<Object> length_object = NewNumberFromSize(length);
obj->set_length(*length_object);
- Handle<ExternalArray> elements = NewExternalArray(
+ Handle<FixedTypedArrayBase> elements = NewFixedTypedArrayWithExternalPointer(
static_cast<int>(length), type,
static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
Handle<Map> map = JSObject::GetElementsTransitionMap(obj, elements_kind);
@@ -2003,8 +1899,7 @@ void Factory::ReinitializeJSProxy(Handle<JSProxy> proxy, InstanceType type,
DCHECK(size_difference >= 0);
// Allocate the backing storage for the properties.
- int prop_size = map->InitialPropertiesLength();
- Handle<FixedArray> properties = NewFixedArray(prop_size, TENURED);
+ Handle<FixedArray> properties = empty_fixed_array();
Heap* heap = isolate()->heap();
MaybeHandle<SharedFunctionInfo> shared;
@@ -2023,7 +1918,7 @@ void Factory::ReinitializeJSProxy(Handle<JSProxy> proxy, InstanceType type,
if (size_difference > 0) {
Address address = proxy->address();
heap->CreateFillerObjectAt(address + map->instance_size(), size_difference);
- heap->AdjustLiveBytes(address, -size_difference,
+ heap->AdjustLiveBytes(*proxy, -size_difference,
Heap::CONCURRENT_TO_SWEEPER);
}
@@ -2057,9 +1952,9 @@ Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy() {
Handle<Map> map = NewMap(JS_GLOBAL_PROXY_TYPE, JSGlobalProxy::kSize);
// Maintain invariant expected from any JSGlobalProxy.
map->set_is_access_check_needed(true);
- CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateJSObjectFromMap(
- *map, NOT_TENURED, false),
- JSGlobalProxy);
+ CALL_HEAP_FUNCTION(
+ isolate(), isolate()->heap()->AllocateJSObjectFromMap(*map, NOT_TENURED),
+ JSGlobalProxy);
}
@@ -2077,8 +1972,7 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
DCHECK(map->instance_type() == object->map()->instance_type());
// Allocate the backing storage for the properties.
- int prop_size = map->InitialPropertiesLength();
- Handle<FixedArray> properties = NewFixedArray(prop_size, TENURED);
+ Handle<FixedArray> properties = empty_fixed_array();
// In order to keep heap in consistent state there must be no allocations
// before object re-initialization is finished.
@@ -2274,13 +2168,6 @@ Handle<String> Factory::NumberToString(Handle<Object> number,
Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
- // Get the original code of the function.
- Handle<Code> code(shared->code());
-
- // Create a copy of the code before allocating the debug info object to avoid
- // allocation while setting up the debug info object.
- Handle<Code> original_code(*Factory::CopyCode(code));
-
// Allocate initial fixed array for active break points before allocating the
// debug info object to avoid allocation while setting up the debug info
// object.
@@ -2293,8 +2180,7 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
Handle<DebugInfo> debug_info =
Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE));
debug_info->set_shared(*shared);
- debug_info->set_original_code(*original_code);
- debug_info->set_code(*code);
+ debug_info->set_code(shared->code());
debug_info->set_break_points(*break_points);
// Link debug info to function.
@@ -2307,10 +2193,9 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
int length) {
bool strict_mode_callee = is_strict(callee->shared()->language_mode()) ||
- !callee->is_simple_parameter_list();
+ !callee->has_simple_parameters();
Handle<Map> map = strict_mode_callee ? isolate()->strict_arguments_map()
: isolate()->sloppy_arguments_map();
-
AllocationSiteUsageContext context(isolate(), Handle<AllocationSite>(),
false);
DCHECK(!isolate()->has_pending_exception());
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 2de768bf13..2c3d687786 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -16,9 +16,8 @@ class FeedbackVectorSpec;
// Interface for handle based allocation.
class Factory final {
public:
- Handle<Oddball> NewOddball(Handle<Map> map,
- const char* to_string,
- Handle<Object> to_number,
+ Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
+ Handle<Object> to_number, const char* type_of,
byte kind);
// Allocates a fixed array initialized with undefined values.
@@ -283,10 +282,11 @@ class Factory final {
Handle<ByteArray> NewByteArray(int length,
PretenureFlag pretenure = NOT_TENURED);
- Handle<ExternalArray> NewExternalArray(
- int length,
- ExternalArrayType array_type,
- void* external_pointer,
+ Handle<BytecodeArray> NewBytecodeArray(int length, const byte* raw_bytecodes,
+ int frame_size);
+
+ Handle<FixedTypedArrayBase> NewFixedTypedArrayWithExternalPointer(
+ int length, ExternalArrayType array_type, void* external_pointer,
PretenureFlag pretenure = NOT_TENURED);
Handle<FixedTypedArrayBase> NewFixedTypedArray(
@@ -321,6 +321,10 @@ class Factory final {
Handle<FixedArray> CopyFixedArrayWithMap(Handle<FixedArray> array,
Handle<Map> map);
+ Handle<FixedArray> CopyFixedArrayAndGrow(
+ Handle<FixedArray> array, int grow_by,
+ PretenureFlag pretenure = NOT_TENURED);
+
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
// This method expects a COW array in new space, and creates a copy
@@ -350,8 +354,12 @@ class Factory final {
Handle<HeapNumber> NewHeapNumber(double value,
MutableMode mode = IMMUTABLE,
PretenureFlag pretenure = NOT_TENURED);
- Handle<Float32x4> NewFloat32x4(float w, float x, float y, float z,
- PretenureFlag pretenure = NOT_TENURED);
+
+#define SIMD128_NEW_DECL(TYPE, Type, type, lane_count, lane_type) \
+ Handle<Type> New##Type(lane_type lanes[lane_count], \
+ PretenureFlag pretenure = NOT_TENURED);
+ SIMD128_TYPES(SIMD128_NEW_DECL)
+#undef SIMD128_NEW_DECL
// These objects are used by the api to create env-independent data
// structures in the heap.
@@ -379,7 +387,6 @@ class Factory final {
Handle<JSObject> NewJSObjectFromMap(
Handle<Map> map,
PretenureFlag pretenure = NOT_TENURED,
- bool allocate_properties = true,
Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
// JS modules are pretenured.
@@ -531,52 +538,35 @@ class Factory final {
Handle<Code> CopyCode(Handle<Code> code, Vector<byte> reloc_info);
// Interface for creating error objects.
-
- Handle<Object> NewError(const char* maker, const char* message,
- Handle<JSArray> args);
- Handle<String> EmergencyNewError(const char* message, Handle<JSArray> args);
-
- Handle<Object> NewError(const char* constructor, Handle<String> message);
+ Handle<Object> NewError(Handle<JSFunction> constructor,
+ Handle<String> message);
Handle<Object> NewInvalidStringLengthError() {
return NewRangeError(MessageTemplate::kInvalidStringLength);
}
- Handle<Object> NewError(const char* maker,
+ Handle<Object> NewError(Handle<JSFunction> constructor,
MessageTemplate::Template template_index,
Handle<Object> arg0 = Handle<Object>(),
Handle<Object> arg1 = Handle<Object>(),
Handle<Object> arg2 = Handle<Object>());
- Handle<Object> NewError(MessageTemplate::Template template_index,
- Handle<Object> arg0 = Handle<Object>(),
- Handle<Object> arg1 = Handle<Object>(),
- Handle<Object> arg2 = Handle<Object>());
+#define DEFINE_ERROR(NAME, name) \
+ Handle<Object> New##NAME(MessageTemplate::Template template_index, \
+ Handle<Object> arg0 = Handle<Object>(), \
+ Handle<Object> arg1 = Handle<Object>(), \
+ Handle<Object> arg2 = Handle<Object>()) { \
+ return NewError(isolate()->name##_function(), template_index, arg0, arg1, \
+ arg2); \
+ }
- Handle<Object> NewTypeError(MessageTemplate::Template template_index,
- Handle<Object> arg0 = Handle<Object>(),
- Handle<Object> arg1 = Handle<Object>(),
- Handle<Object> arg2 = Handle<Object>());
-
- Handle<Object> NewSyntaxError(MessageTemplate::Template template_index,
- Handle<Object> arg0 = Handle<Object>(),
- Handle<Object> arg1 = Handle<Object>(),
- Handle<Object> arg2 = Handle<Object>());
-
- Handle<Object> NewReferenceError(MessageTemplate::Template template_index,
- Handle<Object> arg0 = Handle<Object>(),
- Handle<Object> arg1 = Handle<Object>(),
- Handle<Object> arg2 = Handle<Object>());
-
- Handle<Object> NewRangeError(MessageTemplate::Template template_index,
- Handle<Object> arg0 = Handle<Object>(),
- Handle<Object> arg1 = Handle<Object>(),
- Handle<Object> arg2 = Handle<Object>());
-
- Handle<Object> NewEvalError(MessageTemplate::Template template_index,
- Handle<Object> arg0 = Handle<Object>(),
- Handle<Object> arg1 = Handle<Object>(),
- Handle<Object> arg2 = Handle<Object>());
+ DEFINE_ERROR(Error, error)
+ DEFINE_ERROR(EvalError, eval_error)
+ DEFINE_ERROR(RangeError, range_error)
+ DEFINE_ERROR(ReferenceError, reference_error)
+ DEFINE_ERROR(SyntaxError, syntax_error)
+ DEFINE_ERROR(TypeError, type_error)
+#undef DEFINE_ERROR
Handle<String> NumberToString(Handle<Object> number,
bool check_number_string_cache = true);
diff --git a/deps/v8/src/field-index-inl.h b/deps/v8/src/field-index-inl.h
index c151ab1072..042e4fbdd2 100644
--- a/deps/v8/src/field-index-inl.h
+++ b/deps/v8/src/field-index-inl.h
@@ -16,7 +16,7 @@ inline FieldIndex FieldIndex::ForInObjectOffset(int offset, Map* map) {
int index = offset / kPointerSize;
DCHECK(map == NULL ||
index < (map->GetInObjectPropertyOffset(0) / kPointerSize +
- map->inobject_properties()));
+ map->GetInObjectProperties()));
return FieldIndex(true, index, false, 0, 0, true);
}
@@ -25,7 +25,7 @@ inline FieldIndex FieldIndex::ForPropertyIndex(Map* map,
int property_index,
bool is_double) {
DCHECK(map->instance_type() >= FIRST_NONSTRING_TYPE);
- int inobject_properties = map->inobject_properties();
+ int inobject_properties = map->GetInObjectProperties();
bool is_inobject = property_index < inobject_properties;
int first_inobject_offset;
if (is_inobject) {
@@ -58,7 +58,7 @@ inline FieldIndex FieldIndex::ForLoadByFieldIndex(Map* map, int orig_index) {
field_index += JSObject::kHeaderSize / kPointerSize;
}
FieldIndex result(is_inobject, field_index, is_double,
- map->inobject_properties(), first_inobject_offset);
+ map->GetInObjectProperties(), first_inobject_offset);
DCHECK(result.GetLoadByFieldIndex() == orig_index);
return result;
}
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 79611270dd..2b12e65789 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -185,32 +185,35 @@ DEFINE_IMPLICATION(es_staging, harmony)
DEFINE_BOOL(legacy_const, true, "legacy semantics for const in sloppy mode")
// Features that are still work in progress (behind individual flags).
-#define HARMONY_INPROGRESS(V) \
- V(harmony_modules, "harmony modules") \
- V(harmony_array_includes, "harmony Array.prototype.includes") \
- V(harmony_regexps, "harmony regular expression extensions") \
- V(harmony_proxies, "harmony proxies") \
- V(harmony_sloppy, "harmony features in sloppy mode") \
- V(harmony_unicode_regexps, "harmony unicode regexps") \
- V(harmony_reflect, "harmony Reflect API") \
- V(harmony_destructuring, "harmony destructuring") \
- V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_atomics, "harmony atomics") \
- V(harmony_new_target, "harmony new.target")
+#define HARMONY_INPROGRESS(V) \
+ V(harmony_modules, "harmony modules") \
+ V(harmony_regexps, "harmony regular expression extensions") \
+ V(harmony_proxies, "harmony proxies") \
+ V(harmony_sloppy_function, "harmony sloppy function block scoping") \
+ V(harmony_sloppy_let, "harmony let in sloppy mode") \
+ V(harmony_unicode_regexps, "harmony unicode regexps") \
+ V(harmony_reflect, "harmony Reflect API") \
+ V(harmony_destructuring, "harmony destructuring") \
+ V(harmony_default_parameters, "harmony default parameters") \
+ V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
+ V(harmony_atomics, "harmony atomics") \
+ V(harmony_simd, "harmony simd")
// Features that are complete (but still behind --harmony/es-staging flag).
#define HARMONY_STAGED(V) \
+ V(harmony_array_includes, "harmony Array.prototype.includes") \
V(harmony_tostring, "harmony toString") \
V(harmony_concat_spreadable, "harmony isConcatSpreadable") \
V(harmony_rest_parameters, "harmony rest parameters") \
- V(harmony_spreadcalls, "harmony spread-calls") \
- V(harmony_spread_arrays, "harmony spread in array literals")
+ V(harmony_sloppy, "harmony features in sloppy mode")
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V) \
- V(harmony_arrow_functions, "harmony arrow functions") \
- V(harmony_computed_property_names, "harmony computed property names") \
- V(harmony_unicode, "harmony unicode escapes") \
+#define HARMONY_SHIPPING(V) \
+ V(harmony_arrow_functions, "harmony arrow functions") \
+ V(harmony_new_target, "harmony new.target") \
+ V(harmony_object_observe, "harmony Object.observe") \
+ V(harmony_spreadcalls, "harmony spread-calls") \
+ V(harmony_spread_arrays, "harmony spread in array literals") \
V(harmony_object, "harmony Object methods")
// Once a shipping feature has proved stable in the wild, it will be dropped
@@ -238,7 +241,7 @@ HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
// Feature dependencies.
-DEFINE_IMPLICATION(harmony_unicode_regexps, harmony_unicode)
+DEFINE_IMPLICATION(harmony_sloppy_let, harmony_sloppy)
// Flags for experimental implementation features.
@@ -268,7 +271,7 @@ DEFINE_BOOL(smi_binop, true, "support smi representation in binary operations")
// Flags for optimization types.
DEFINE_BOOL(optimize_for_size, false,
"Enables optimizations which favor memory size over execution "
- "speed.")
+ "speed")
DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
@@ -276,6 +279,14 @@ DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_BOOL(string_slices, true, "use string slices")
+// Flags for Ignition.
+DEFINE_BOOL(ignition, false, "use ignition interpreter")
+DEFINE_STRING(ignition_filter, "~~", "filter for ignition interpreter")
+DEFINE_BOOL(print_bytecode, false,
+ "print bytecode generated by ignition interpreter")
+DEFINE_BOOL(trace_ignition_codegen, false,
+ "trace the codegen of ignition interpreter bytecode handlers")
+
// Flags for Crankshaft.
DEFINE_BOOL(crankshaft, true, "use crankshaft")
DEFINE_STRING(hydrogen_filter, "*", "optimization filter")
@@ -388,6 +399,9 @@ DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
DEFINE_BOOL(turbo_shipping, true, "enable TurboFan compiler on subset")
DEFINE_BOOL(turbo_greedy_regalloc, false, "use the greedy register allocator")
+DEFINE_BOOL(turbo_preprocess_ranges, false,
+ "run pre-register allocation heuristics")
+DEFINE_IMPLICATION(turbo_greedy_regalloc, turbo_preprocess_ranges)
DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
DEFINE_STRING(turbo_filter, "~~", "optimization filter for TurboFan compiler")
DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
@@ -430,6 +444,15 @@ DEFINE_BOOL(turbo_stress_loop_peeling, false,
DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
DEFINE_BOOL(turbo_cache_shared_code, true, "cache context-independent code")
+DEFINE_BOOL(turbo_preserve_shared_code, false, "keep context-independent code")
+
+#if defined(V8_WASM)
+// Flags for native WebAssembly.
+DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
+DEFINE_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
+DEFINE_BOOL(wasm_break_on_decoder_error, false,
+ "debug break when wasm decoder encounters an error")
+#endif
DEFINE_INT(typed_array_max_size_in_heap, 64,
"threshold for in-heap typed array")
@@ -625,7 +648,6 @@ DEFINE_BOOL(age_code, true,
"track un-executed functions to age code and flush only "
"old code (required for code flushing)")
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
-DEFINE_BOOL(incremental_marking_steps, true, "do incremental marking steps")
DEFINE_BOOL(overapproximate_weak_closure, true,
"overapproximate weak closer to reduce atomic pause time")
DEFINE_INT(min_progress_during_object_groups_marking, 128,
@@ -667,6 +689,7 @@ DEFINE_BOOL(use_idle_notification, true,
DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
DEFINE_BOOL(vector_stores, false, "use vectors for store ics")
+DEFINE_BOOL(global_var_shortcuts, false, "use ic-less global loads and stores")
// macro-assembler-ia32.cc
DEFINE_BOOL(native_code_counters, false,
@@ -795,6 +818,10 @@ DEFINE_BOOL(manual_evacuation_candidates_selection, false,
"Test mode only flag. It allows an unit test to select evacuation "
"candidates pages (requires --stress_compaction).")
+// api.cc
+DEFINE_INT(external_allocation_limit_incremental_time, 1,
+ "Time spent in incremental marking steps (in ms) once the external "
+ "allocation limit is reached")
//
// Dev shell flags
@@ -803,8 +830,6 @@ DEFINE_BOOL(manual_evacuation_candidates_selection, false,
DEFINE_BOOL(help, false, "Print usage message, including flags, on console")
DEFINE_BOOL(dump_counters, false, "Dump counters on exit")
-DEFINE_BOOL(debugger, false, "Enable JavaScript debugger")
-
DEFINE_STRING(map_counters, "", "Map counters to a file")
DEFINE_ARGS(js_arguments,
"Pass all remaining arguments to the script. Alias for \"--\".")
@@ -929,6 +954,9 @@ DEFINE_BOOL(ll_prof, false, "Enable low-level linux profiler.")
DEFINE_BOOL(perf_basic_prof, false,
"Enable perf linux profiler (basic support).")
DEFINE_NEG_IMPLICATION(perf_basic_prof, compact_code_space)
+DEFINE_BOOL(perf_basic_prof_only_functions, false,
+ "Only report function code ranges to perf (i.e. no stubs).")
+DEFINE_IMPLICATION(perf_basic_prof_only_functions, perf_basic_prof)
DEFINE_STRING(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof")
DEFINE_BOOL(log_internal_timer_events, false, "Time internal events.")
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 650d6f9725..e69bcd9033 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -217,6 +217,12 @@ inline JSFunction* JavaScriptFrame::function() const {
}
+inline Object* JavaScriptFrame::function_slot_object() const {
+ const int offset = JavaScriptFrameConstants::kFunctionOffset;
+ return Memory::Object_at(fp() + offset);
+}
+
+
inline StubFrame::StubFrame(StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {
}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 723db4ae13..8561e557e8 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -6,14 +6,11 @@
#include <sstream>
-#include "src/v8.h"
-
#include "src/ast.h"
#include "src/base/bits.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
-#include "src/full-codegen.h"
-#include "src/heap/mark-compact.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/safepoint-table.h"
#include "src/scopeinfo.h"
#include "src/string-stream.h"
@@ -738,8 +735,8 @@ Object* JavaScriptFrame::GetOriginalConstructor() const {
}
DCHECK(IsConstructFrame(fp));
STATIC_ASSERT(ConstructFrameConstants::kOriginalConstructorOffset ==
- StandardFrameConstants::kExpressionsOffset - 2 * kPointerSize);
- return GetExpression(fp, 2);
+ StandardFrameConstants::kExpressionsOffset - 3 * kPointerSize);
+ return GetExpression(fp, 3);
}
@@ -819,7 +816,7 @@ void JavaScriptFrame::PrintFunctionAndOffset(JSFunction* function, Code* code,
Object* script_name_raw = script->name();
if (script_name_raw->IsString()) {
String* script_name = String::cast(script->name());
- SmartArrayPointer<char> c_script_name =
+ base::SmartArrayPointer<char> c_script_name =
script_name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
PrintF(file, " at %s:%d", c_script_name.get(), line);
} else {
@@ -1082,18 +1079,14 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
}
-Object* OptimizedFrame::StackSlotAt(int index) const {
- // Positive index means the value is spilled to the locals
- // area. Negative means it is stored in the incoming parameter
- // area.
- if (index >= 0) return GetExpression(index);
+int OptimizedFrame::StackSlotOffsetRelativeToFp(int slot_index) {
+ return StandardFrameConstants::kCallerSPOffset -
+ ((slot_index + 1) * kPointerSize);
+}
- // Index -1 overlaps with last parameter, -n with the first parameter,
- // (-n - 1) with the receiver with n being the number of parameters
- // of the outermost, optimized frame.
- int const parameter_count = ComputeParametersCount();
- int const parameter_index = index + parameter_count;
- return (parameter_index == -1) ? receiver() : GetParameter(parameter_index);
+
+Object* OptimizedFrame::StackSlotAt(int index) const {
+ return Memory::Object_at(fp() + StackSlotOffsetRelativeToFp(index));
}
@@ -1446,6 +1439,9 @@ Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
// after the inner pointer.
Page* page = Page::FromAddress(inner_pointer);
+ DCHECK_EQ(page->owner(), heap->code_space());
+ heap->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(page);
+
Address addr = page->skip_list()->StartFor(inner_pointer);
Address top = heap->code_space()->top();
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 910dc18cfb..72250e37a1 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -120,8 +120,13 @@ class StandardFrameConstants : public AllStatic {
static const int kCPSlotSize =
FLAG_enable_embedded_constant_pool ? kPointerSize : 0;
static const int kFixedFrameSizeFromFp = 2 * kPointerSize + kCPSlotSize;
+ static const int kFixedFrameSizeAboveFp = kPCOnStackSize + kFPOnStackSize;
static const int kFixedFrameSize =
- kPCOnStackSize + kFPOnStackSize + kFixedFrameSizeFromFp;
+ kFixedFrameSizeAboveFp + kFixedFrameSizeFromFp;
+ static const int kFixedSlotCountAboveFp =
+ kFixedFrameSizeAboveFp / kPointerSize;
+ static const int kFixedSlotCount = kFixedFrameSize / kPointerSize;
+ static const int kCPSlotCount = kCPSlotSize / kPointerSize;
static const int kExpressionsOffset = -3 * kPointerSize - kCPSlotSize;
static const int kMarkerOffset = -2 * kPointerSize - kCPSlotSize;
static const int kContextOffset = -1 * kPointerSize - kCPSlotSize;
@@ -155,16 +160,18 @@ class ConstructFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kImplicitReceiverOffset =
- StandardFrameConstants::kExpressionsOffset - 2 * kPointerSize;
+ StandardFrameConstants::kExpressionsOffset - 4 * kPointerSize;
static const int kOriginalConstructorOffset =
- StandardFrameConstants::kExpressionsOffset - 2 * kPointerSize;
+ StandardFrameConstants::kExpressionsOffset - 3 * kPointerSize;
static const int kLengthOffset =
+ StandardFrameConstants::kExpressionsOffset - 2 * kPointerSize;
+ static const int kAllocationSiteOffset =
StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
static const int kCodeOffset =
StandardFrameConstants::kExpressionsOffset - 0 * kPointerSize;
static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
+ StandardFrameConstants::kFixedFrameSize + 5 * kPointerSize;
};
@@ -676,6 +683,8 @@ class OptimizedFrame : public JavaScriptFrame {
DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
+ static int StackSlotOffsetRelativeToFp(int slot_index);
+
protected:
inline explicit OptimizedFrame(StackFrameIteratorBase* iterator);
diff --git a/deps/v8/src/full-codegen/OWNERS b/deps/v8/src/full-codegen/OWNERS
new file mode 100644
index 0000000000..4464e8fd28
--- /dev/null
+++ b/deps/v8/src/full-codegen/OWNERS
@@ -0,0 +1,8 @@
+set noparent
+
+bmeurer@chromium.org
+jarin@chromium.org
+jkummerow@chromium.org
+mstarzinger@chromium.org
+verwaest@chromium.org
+yangguo@chromium.org
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
index e4b7cf34ee..197a5ecc6f 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
@@ -2,16 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
-#include "src/debug.h"
-#include "src/full-codegen.h"
+#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
#include "src/parser.h"
#include "src/scopes.h"
@@ -114,7 +112,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop-at");
}
#endif
@@ -150,7 +148,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
if (locals_count > 0) {
if (locals_count >= 128) {
Label ok;
@@ -315,10 +313,10 @@ void FullCodeGenerator::Generate() {
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
+ // The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !is_simple_parameter_list()) {
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
@@ -345,18 +343,14 @@ void FullCodeGenerator::Generate() {
} else {
PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- DCHECK(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
- VisitVariableDeclaration(function);
- }
VisitDeclarations(scope()->declarations());
}
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
+
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
@@ -364,7 +358,8 @@ void FullCodeGenerator::Generate() {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
Handle<Code> stack_check = isolate()->builtins()->StackCheck();
- PredictableCodeSizeScope predictable(masm_,
+ PredictableCodeSizeScope predictable(masm_);
+ predictable.ExpectSize(
masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
__ Call(stack_check, RelocInfo::CODE_TARGET);
__ bind(&ok);
@@ -417,10 +412,6 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
Label start;
__ bind(&start);
int reset_value = FLAG_interrupt_budget;
- if (info_->is_debug()) {
- // Detect debug break requests as soon as possible.
- reset_value = FLAG_interrupt_budget >> 4;
- }
__ mov(r2, Operand(profiling_counter_));
// The mov instruction above can be either 1 to 3 (for ARMv7) or 1 to 5
// instructions (for ARMv6) depending upon whether it is an extended constant
@@ -498,11 +489,6 @@ void FullCodeGenerator::EmitReturnSequence() {
EmitProfilingCounterReset();
__ bind(&ok);
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- __ bind(&check_exit_codesize);
-#endif
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
@@ -511,7 +497,6 @@ void FullCodeGenerator::EmitReturnSequence() {
SetReturnPosition(function());
// TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
PredictableCodeSizeScope predictable(masm_, -1);
- __ RecordJSReturn();
int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT);
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
__ add(sp, sp, Operand(sp_delta));
@@ -519,28 +504,10 @@ void FullCodeGenerator::EmitReturnSequence() {
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
-
-#ifdef DEBUG
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- DCHECK(Assembler::kJSReturnSequenceInstructions <=
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
-#endif
}
}
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
@@ -548,15 +515,6 @@ void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
}
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- // For simplicity we always test the accumulator register.
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
}
@@ -715,10 +673,6 @@ void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
}
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
@@ -888,8 +842,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
case VariableLocation::LOCAL:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, StackOperand(variable));
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ str(r0, StackOperand(variable));
}
break;
@@ -897,8 +851,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, ContextOperand(cp, variable->index()));
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ str(r0, ContextOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
@@ -909,21 +863,20 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(r2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
DCHECK(IsDeclaredVariableMode(mode));
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
- __ mov(r1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (hole_init) {
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- __ Push(cp, r2, r1, r0);
} else {
__ mov(r0, Operand(Smi::FromInt(0))); // Indicates no initial value.
- __ Push(cp, r2, r1, r0);
}
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ __ Push(r2, r0);
+ __ CallRuntime(IsImmutableVariableMode(mode)
+ ? Runtime::kDeclareReadOnlyLookupSlot
+ : Runtime::kDeclareLookupSlot,
+ 2);
break;
}
}
@@ -976,53 +929,22 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ mov(r2, Operand(variable->name()));
- __ mov(r1, Operand(Smi::FromInt(NONE)));
- __ Push(cp, r2, r1);
+ __ Push(r2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
break;
}
}
}
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case VariableLocation::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- case VariableLocation::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
- // The context is the first argument.
__ mov(r1, Operand(pairs));
__ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
- __ Push(cp, r1, r0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ Push(r1, r0);
+ __ CallRuntime(Runtime::kDeclareGlobals, 2);
// Return value is ignored.
}
@@ -1159,8 +1081,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &done_convert);
__ bind(&convert);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
__ bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ push(r0);
@@ -1347,12 +1269,6 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
}
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
int offset,
FeedbackVectorICSlot slot) {
@@ -1369,7 +1285,7 @@ void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofState typeof_state,
+ TypeofMode typeof_mode,
Label* slow) {
Register current = cp;
Register next = r1;
@@ -1418,7 +1334,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
// All extension objects were empty and it is safe to use a normal global
// load machinery.
- EmitGlobalVariableLoad(proxy, typeof_state);
+ EmitGlobalVariableLoad(proxy, typeof_mode);
}
@@ -1455,9 +1371,8 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
+ TypeofMode typeof_mode,
+ Label* slow, Label* done) {
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
@@ -1465,7 +1380,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
// containing the eval.
Variable* var = proxy->var();
if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
+ EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
__ jmp(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
@@ -1488,22 +1403,35 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
+ TypeofMode typeof_mode) {
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- // Inside typeof use a regular load, not a contextual load, to avoid
- // a reference error.
- CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
+ if (var->IsGlobalSlot()) {
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ const int slot = var->index();
+ const int depth = scope()->ContextChainLength(var->scope());
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ LoadGlobalViaContextStub stub(isolate(), depth);
+ __ CallStub(&stub);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+ } else {
+ __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
+ }
}
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
+ TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
@@ -1515,7 +1443,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- EmitGlobalVariableLoad(proxy, typeof_state);
+ EmitGlobalVariableLoad(proxy, typeof_mode);
context()->Plug(r0);
break;
}
@@ -1523,7 +1451,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
case VariableLocation::CONTEXT: {
- DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (var->binding_needs_init()) {
@@ -1554,8 +1482,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
} else if (var->is_this()) {
- CHECK(info_->function() != nullptr &&
- (info_->function()->kind() & kSubclassConstructor) != 0);
+ CHECK(info_->has_literal() &&
+ (info_->literal()->kind() & kSubclassConstructor) != 0);
// TODO(dslomov): implement 'this' hole check elimination.
skip_init_check = false;
} else {
@@ -1597,12 +1525,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
__ bind(&slow);
__ mov(r1, Operand(var->name()));
__ Push(cp, r1); // Context and name.
Runtime::FunctionId function_id =
- typeof_state == NOT_INSIDE_TYPEOF
+ typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
__ CallRuntime(function_id, 2);
@@ -1999,7 +1927,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpression());
+ DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
SetExpressionPosition(expr, INSERT_BREAK);
@@ -2158,8 +2086,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label suspend, continuation, post_runtime, resume;
__ jmp(&suspend);
-
__ bind(&continuation);
+ __ RecordGeneratorContinuation();
__ jmp(&resume);
__ bind(&suspend);
@@ -2230,9 +2158,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
EnterTryBlock(handler_index, &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(r0); // result
+
__ jmp(&l_suspend);
__ bind(&l_continuation);
+ __ RecordGeneratorContinuation();
__ jmp(&l_resume);
+
__ bind(&l_suspend);
const int generator_object_depth = kPointerSize + try_block_size;
__ ldr(r0, MemOperand(sp, generator_object_depth));
@@ -2269,6 +2200,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ mov(r1, r0);
__ str(r1, MemOperand(sp, 2 * kPointerSize));
+ SetCallPosition(expr, 1);
CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
__ CallStub(&stub);
@@ -2283,7 +2215,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
__ mov(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL); // r0=result.done
+ CallLoadIC(NOT_INSIDE_TYPEOF); // r0=result.done
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
__ cmp(r0, Operand(0));
@@ -2294,7 +2226,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
__ mov(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL); // r0=result.value
+ CallLoadIC(NOT_INSIDE_TYPEOF); // r0=result.value
context()->DropAndPlug(2, r0); // drop iter and g
break;
}
@@ -2446,7 +2378,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
__ mov(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL, language_mode());
+ CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
}
@@ -2638,11 +2570,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
}
}
- // prototype
- __ CallRuntime(Runtime::kToFastProperties, 1);
-
- // constructor
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ // Set both the prototype and constructor to have fast properties, and also
+ // freeze them in strong mode.
+ __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
}
@@ -2659,7 +2589,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
FeedbackVectorICSlot slot) {
- DCHECK(expr->IsValidReferenceExpression());
+ DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
LhsKind assign_type = Property::GetAssignType(prop);
@@ -2752,13 +2682,32 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
FeedbackVectorICSlot slot) {
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
__ ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
+ } else if (var->IsGlobalSlot()) {
+ // Global var, const, or let.
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ const int slot = var->index();
+ const int depth = scope()->ContextChainLength(var->scope());
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(r0));
+ StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
+ __ CallStub(&stub);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ push(r0);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2790,6 +2739,20 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&const_error);
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ } else if (var->is_this() && op == Token::INIT_CONST) {
+ // Initializing assignment to const {this} needs a write barrier.
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label uninitialized_this;
+ MemOperand location = VarOperand(var, r1);
+ __ ldr(r3, location);
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ b(eq, &uninitialized_this);
+ __ mov(r0, Operand(var->name()));
+ __ Push(r0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@@ -3103,7 +3066,7 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
__ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -3141,22 +3104,6 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
-void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperCallReference* super_ref, FeedbackVectorICSlot slot) {
- Variable* this_var = super_ref->this_var()->var();
- GetVar(r1, this_var);
- __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
- Label uninitialized_this;
- __ b(eq, &uninitialized_this);
- __ mov(r0, Operand(this_var->name()));
- __ Push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&uninitialized_this);
-
- EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
-}
-
-
// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
VariableProxy* callee = expr->expression()->AsVariableProxy();
@@ -3238,7 +3185,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -3310,7 +3257,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetExpressionPosition(expr);
+ SetConstructCallPosition(expr);
// Load function and argument count into r1 and r0.
__ mov(r0, Operand(arg_count));
@@ -3338,9 +3285,6 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- VariableProxy* new_target_proxy = super_call_ref->new_target_var();
- VisitForStackValue(new_target_proxy);
-
EmitLoadSuperConstructor(super_call_ref);
__ push(result_register());
@@ -3353,7 +3297,11 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetExpressionPosition(expr);
+ SetConstructCallPosition(expr);
+
+ // Load original constructor into r4.
+ VisitForAccumulatorValue(super_call_ref->new_target_var());
+ __ mov(r4, result_register());
// Load function and argument count into r1 and r0.
__ mov(r0, Operand(arg_count));
@@ -3375,11 +3323,8 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
-
RecordJSReturnSite(expr);
- EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
context()->Plug(r0);
}
@@ -3481,7 +3426,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3491,15 +3436,13 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
__ JumpIfSmi(r0, if_false);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
+ __ CompareObjectType(r0, r1, r1, SIMD128_VALUE_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ne, if_true, if_false, fall_through);
+ Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3883,33 +3826,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4055,19 +3971,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- MathPowStub stub(isolate(), MathPowStub::ON_STACK);
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -4108,6 +4011,19 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into r0 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(r0);
+}
+
+
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4231,18 +4147,6 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() >= 2);
@@ -4287,6 +4191,9 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ CallRuntime(Runtime::kGetPrototype, 1);
__ Push(result_register());
+ // Load original constructor into r4.
+ __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
+
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -4345,55 +4252,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- DCHECK_NOT_NULL(args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort(kAttemptToUseUndefinedCache);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- context()->Plug(r0);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = r0;
- Register cache = r1;
- __ ldr(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
- __ ldr(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ ldr(cache,
- FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
-
- Label done, not_found;
- __ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
- // r2 now holds finger offset as a smi.
- __ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // r3 now points to the start of fixed array elements.
- __ ldr(r2, MemOperand::PointerAddressFromSmiKey(r3, r2, PreIndex));
- // Note side effect of PreIndex: r3 now points to the key of the pair.
- __ cmp(key, r2);
- __ b(ne, &not_found);
-
- __ ldr(r0, MemOperand(r3, kPointerSize));
- __ b(&done);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCacheRT, 2);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4672,47 +4530,6 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
- // Assert: expr === CallRuntime("ReflectConstruct")
- DCHECK_EQ(1, expr->arguments()->length());
- CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
-
- ZoneList<Expression*>* args = call->arguments();
- DCHECK_EQ(3, args->length());
-
- SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Load ReflectConstruct function
- EmitLoadJSRuntimeFunction(call);
-
- // Push the target function under the receiver.
- __ ldr(ip, MemOperand(sp, 0));
- __ push(ip);
- __ str(r0, MemOperand(sp, kPointerSize));
-
- // Push super constructor
- EmitLoadSuperConstructor(super_call_ref);
- __ Push(result_register());
-
- // Push arguments array
- VisitForStackValue(args->at(1));
-
- // Push NewTarget
- DCHECK(args->at(2)->IsVariableProxy());
- VisitForStackValue(args->at(2));
-
- EmitCallJSRuntimeFunction(call);
-
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0);
-
- // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
- EmitInitializeThisAfterSuper(super_call_ref);
-}
-
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push the builtins object as the receiver.
Register receiver = LoadDescriptor::ReceiverRegister();
@@ -4724,7 +4541,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ mov(LoadDescriptor::NameRegister(), Operand(expr->name()));
__ mov(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
+ CallLoadIC(NOT_INSIDE_TYPEOF);
}
@@ -4732,7 +4549,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -4802,9 +4619,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ mov(r1, Operand(Smi::FromInt(language_mode())));
- __ push(r1);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy,
+ 2);
context()->Plug(r0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4815,9 +4633,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (var->IsUnallocatedOrGlobalSlot()) {
__ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(SLOPPY)));
- __ Push(r2, r1, r0);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ Push(r2, r1);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
context()->Plug(r0);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
@@ -4907,7 +4724,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- DCHECK(expr->expression()->IsValidReferenceExpression());
+ DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ CountOperation");
@@ -5189,12 +5006,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(eq, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(r0, if_false);
- // Check for undetectable objects => false.
__ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE);
- __ b(ge, if_false);
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through);
+ Split(lt, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->symbol_string())) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r0, r1, SYMBOL_TYPE);
@@ -5234,6 +5047,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(eq, if_true, if_false, fall_through);
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(check, factory->type##_string())) { \
+ __ JumpIfSmi(r0, if_false); \
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); \
+ __ CompareRoot(r0, Heap::k##Type##MapRootIndex); \
+ Split(eq, if_true, if_false, fall_through);
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
} else {
if (if_false != fall_through) __ jmp(if_false);
}
@@ -5370,21 +5193,21 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_script_scope() ||
- declaration_scope->is_module_scope()) {
+ Scope* closure_scope = scope()->ClosureScope();
+ if (closure_scope->is_script_scope() ||
+ closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
__ mov(ip, Operand(Smi::FromInt(0)));
- } else if (declaration_scope->is_eval_scope()) {
+ } else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
__ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
} else {
- DCHECK(declaration_scope->is_function_scope());
+ DCHECK(closure_scope->is_function_scope());
__ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
__ push(ip);
diff --git a/deps/v8/src/arm64/full-codegen-arm64.cc b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
index 324bfb8160..73aaf46abd 100644
--- a/deps/v8/src/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -2,21 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
-#include "src/debug.h"
-#include "src/full-codegen.h"
+#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
#include "src/parser.h"
#include "src/scopes.h"
#include "src/arm64/code-stubs-arm64.h"
+#include "src/arm64/frames-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
namespace v8 {
@@ -112,7 +111,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ Debug("stop-at", __LINE__, BREAK);
}
#endif
@@ -152,7 +151,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
if (locals_count > 0) {
if (locals_count >= 128) {
@@ -324,7 +323,7 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !is_simple_parameter_list()) {
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
@@ -350,16 +349,14 @@ void FullCodeGenerator::Generate() {
} else {
PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- DCHECK(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
- VisitVariableDeclaration(function);
- }
VisitDeclarations(scope()->declarations());
}
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
+
{
Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
@@ -410,10 +407,6 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
- if (info_->is_debug()) {
- // Detect debug break requests as soon as possible.
- reset_value = FLAG_interrupt_budget >> 4;
- }
__ Mov(x2, Operand(profiling_counter_));
__ Mov(x3, Smi::FromInt(reset_value));
__ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
@@ -492,49 +485,27 @@ void FullCodeGenerator::EmitReturnSequence() {
EmitProfilingCounterReset();
__ Bind(&ok);
- // Make sure that the constant pool is not emitted inside of the return
- // sequence. This sequence can get patched when the debugger is used. See
- // debug-arm64.cc:BreakLocation::SetDebugBreakAtReturn().
- {
- InstructionAccurateScope scope(masm_,
- Assembler::kJSReturnSequenceInstructions);
- SetReturnPosition(function());
- __ RecordJSReturn();
- // This code is generated using Assembler methods rather than Macro
- // Assembler methods because it will be patched later on, and so the size
- // of the generated code must be consistent.
- const Register& current_sp = __ StackPointer();
- // Nothing ensures 16 bytes alignment here.
- DCHECK(!current_sp.Is(csp));
- __ mov(current_sp, fp);
- int no_frame_start = masm_->pc_offset();
- __ ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSize, PostIndex));
- // Drop the arguments and receiver and return.
- // TODO(all): This implementation is overkill as it supports 2**31+1
- // arguments, consider how to improve it without creating a security
- // hole.
- __ ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
- __ add(current_sp, current_sp, ip0);
- __ ret();
- int32_t arg_count = info_->scope()->num_parameters() + 1;
- __ dc64(kXRegSize * arg_count);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
+ SetReturnPosition(function());
+ const Register& current_sp = __ StackPointer();
+ // Nothing ensures 16 bytes alignment here.
+ DCHECK(!current_sp.Is(csp));
+ __ Mov(current_sp, fp);
+ int no_frame_start = masm_->pc_offset();
+ __ Ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSize, PostIndex));
+ // Drop the arguments and receiver and return.
+ // TODO(all): This implementation is overkill as it supports 2**31+1
+ // arguments, consider how to improve it without creating a security
+ // hole.
+ __ ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
+ __ Add(current_sp, current_sp, ip0);
+ __ Ret();
+ int32_t arg_count = info_->scope()->num_parameters() + 1;
+ __ dc64(kXRegSize * arg_count);
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
@@ -542,15 +513,6 @@ void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
}
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- // For simplicity we always test the accumulator register.
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
// Root values have no side effects.
}
@@ -708,10 +670,6 @@ void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
}
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
@@ -907,21 +865,21 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ Mov(x2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
DCHECK(IsDeclaredVariableMode(mode));
- PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY
- : NONE;
- __ Mov(x1, Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (hole_init) {
__ LoadRoot(x0, Heap::kTheHoleValueRootIndex);
- __ Push(cp, x2, x1, x0);
+ __ Push(x2, x0);
} else {
// Pushing 0 (xzr) indicates no initial value.
- __ Push(cp, x2, x1, xzr);
+ __ Push(x2, xzr);
}
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ __ CallRuntime(IsImmutableVariableMode(mode)
+ ? Runtime::kDeclareReadOnlyLookupSlot
+ : Runtime::kDeclareLookupSlot,
+ 2);
break;
}
}
@@ -974,46 +932,16 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ Function Declaration");
__ Mov(x2, Operand(variable->name()));
- __ Mov(x1, Smi::FromInt(NONE));
- __ Push(cp, x2, x1);
+ __ Push(x2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
break;
}
}
}
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case VariableLocation::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- case VariableLocation::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ Mov(x11, Operand(pairs));
@@ -1022,8 +950,8 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
flags = x10;
__ Mov(flags, Smi::FromInt(DeclareGlobalsFlags()));
}
- __ Push(cp, x11, flags);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ Push(x11, flags);
+ __ CallRuntime(Runtime::kDeclareGlobals, 2);
// Return value is ignored.
}
@@ -1156,8 +1084,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ JumpIfSmi(x0, &convert);
__ JumpIfObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE, &done_convert, ge);
__ Bind(&convert);
- __ Push(x0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
__ Bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ Push(x0);
@@ -1336,12 +1264,6 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
}
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
int offset,
FeedbackVectorICSlot slot) {
@@ -1357,7 +1279,7 @@ void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofState typeof_state,
+ TypeofMode typeof_mode,
Label* slow) {
Register current = cp;
Register next = x10;
@@ -1401,7 +1323,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
// All extension objects were empty and it is safe to use a normal global
// load machinery.
- EmitGlobalVariableLoad(proxy, typeof_state);
+ EmitGlobalVariableLoad(proxy, typeof_mode);
}
@@ -1436,9 +1358,8 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
+ TypeofMode typeof_mode,
+ Label* slow, Label* done) {
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
@@ -1446,7 +1367,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
// containing the eval.
Variable* var = proxy->var();
if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
+ EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
__ B(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
@@ -1468,22 +1389,35 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
+ TypeofMode typeof_mode) {
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
- __ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ Mov(LoadDescriptor::SlotRegister(),
- SmiFromSlot(proxy->VariableFeedbackSlot()));
- // Inside typeof use a regular load, not a contextual load, to avoid
- // a reference error.
- CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
+ if (var->IsGlobalSlot()) {
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ int const slot = var->index();
+ int const depth = scope()->ContextChainLength(var->scope());
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ Mov(LoadGlobalViaContextDescriptor::SlotRegister(), slot);
+ LoadGlobalViaContextStub stub(isolate(), depth);
+ __ CallStub(&stub);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+ } else {
+ __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
+ __ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ Mov(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(proxy->VariableFeedbackSlot()));
+ CallLoadIC(typeof_mode);
+ }
}
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
+ TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
@@ -1495,7 +1429,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "Global variable");
- EmitGlobalVariableLoad(proxy, typeof_state);
+ EmitGlobalVariableLoad(proxy, typeof_mode);
context()->Plug(x0);
break;
}
@@ -1503,7 +1437,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
case VariableLocation::CONTEXT: {
- DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
@@ -1535,8 +1469,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
} else if (var->is_this()) {
- CHECK(info_->function() != nullptr &&
- (info_->function()->kind() & kSubclassConstructor) != 0);
+ CHECK(info_->has_literal() &&
+ (info_->literal()->kind() & kSubclassConstructor) != 0);
// TODO(dslomov): implement 'this' hole check elimination.
skip_init_check = false;
} else {
@@ -1577,13 +1511,13 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Label done, slow;
// Generate code for loading from variables potentially shadowed by
// eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
__ Bind(&slow);
Comment cmnt(masm_, "Lookup variable");
__ Mov(x1, Operand(var->name()));
__ Push(cp, x1); // Context and name.
Runtime::FunctionId function_id =
- typeof_state == NOT_INSIDE_TYPEOF
+ typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
__ CallRuntime(function_id, 2);
@@ -1974,7 +1908,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpression());
+ DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
SetExpressionPosition(expr, INSERT_BREAK);
@@ -2120,7 +2054,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ Mov(LoadDescriptor::NameRegister(), Operand(key->value()));
__ Mov(LoadDescriptor::SlotRegister(),
SmiFromSlot(prop->PropertyFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL, language_mode());
+ CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
}
@@ -2333,17 +2267,15 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
}
}
- // prototype
- __ CallRuntime(Runtime::kToFastProperties, 1);
-
- // constructor
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ // Set both the prototype and constructor to have fast properties, and also
+ // freeze them in strong mode.
+ __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
}
void FullCodeGenerator::EmitAssignment(Expression* expr,
FeedbackVectorICSlot slot) {
- DCHECK(expr->IsValidReferenceExpression());
+ DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
LhsKind assign_type = Property::GetAssignType(prop);
@@ -2439,13 +2371,32 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
FeedbackVectorICSlot slot) {
ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
// Global var, const, or let.
__ Mov(StoreDescriptor::NameRegister(), Operand(var->name()));
__ Ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
+ } else if (var->IsGlobalSlot()) {
+ // Global var, const, or let.
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ int const slot = var->index();
+ int const depth = scope()->ContextChainLength(var->scope());
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ Mov(StoreGlobalViaContextDescriptor::SlotRegister(), slot);
+ DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(x0));
+ StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
+ __ CallStub(&stub);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ Push(x0);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2475,6 +2426,19 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ Bind(&const_error);
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ } else if (var->is_this() && op == Token::INIT_CONST) {
+ // Initializing assignment to const {this} needs a write barrier.
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label uninitialized_this;
+ MemOperand location = VarOperand(var, x1);
+ __ Ldr(x10, location);
+ __ JumpIfRoot(x10, Heap::kTheHoleValueRootIndex, &uninitialized_this);
+ __ Mov(x0, Operand(var->name()));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@@ -2788,10 +2752,11 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- SetExpressionPosition(expr);
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ SetCallPosition(expr, arg_count);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
__ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
@@ -2832,21 +2797,6 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
-void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperCallReference* super_ref, FeedbackVectorICSlot slot) {
- Variable* this_var = super_ref->this_var()->var();
- GetVar(x1, this_var);
- Label uninitialized_this;
- __ JumpIfRoot(x1, Heap::kTheHoleValueRootIndex, &uninitialized_this);
- __ Mov(x0, Operand(this_var->name()));
- __ Push(x0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&uninitialized_this);
-
- EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
-}
-
-
// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
VariableProxy* callee = expr->expression()->AsVariableProxy();
@@ -2907,26 +2857,26 @@ void FullCodeGenerator::VisitCall(Call* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- PushCalleeAndWithBaseObject(expr);
+ PushCalleeAndWithBaseObject(expr);
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ Peek(x10, (arg_count + 1) * kPointerSize);
- __ Push(x10);
- EmitResolvePossiblyDirectEval(arg_count);
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ Peek(x10, (arg_count + 1) * kPointerSize);
+ __ Push(x10);
+ EmitResolvePossiblyDirectEval(arg_count);
- // Touch up the stack with the resolved function.
- __ Poke(x0, (arg_count + 1) * kPointerSize);
+ // Touch up the stack with the resolved function.
+ __ Poke(x0, (arg_count + 1) * kPointerSize);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
// Call the evaluated function.
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
@@ -3001,7 +2951,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetExpressionPosition(expr);
+ SetConstructCallPosition(expr);
// Load function and argument count into x1 and x0.
__ Mov(x0, arg_count);
@@ -3029,9 +2979,6 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- VariableProxy* new_target_proxy = super_call_ref->new_target_var();
- VisitForStackValue(new_target_proxy);
-
EmitLoadSuperConstructor(super_call_ref);
__ push(result_register());
@@ -3044,7 +2991,11 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetExpressionPosition(expr);
+ SetConstructCallPosition(expr);
+
+ // Load original constructor into x4.
+ VisitForAccumulatorValue(super_call_ref->new_target_var());
+ __ Mov(x4, result_register());
// Load function and argument count into x1 and x0.
__ Mov(x0, arg_count);
@@ -3066,11 +3017,8 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
-
RecordJSReturnSite(expr);
- EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
context()->Plug(x0);
}
@@ -3169,8 +3117,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitIsUndetectableObject");
+void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3180,15 +3127,13 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
__ JumpIfSmi(x0, if_false);
- __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset));
- __ Tst(x11, 1 << Map::kIsUndetectable);
+ __ CompareObjectType(x0, x10, x11, SIMD128_VALUE_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ne, if_true, if_false, fall_through);
+ Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3593,33 +3538,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
ASM_LOCATION("FullCodeGenerator::EmitValueOf");
ZoneList<Expression*>* args = expr->arguments();
@@ -3759,18 +3677,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the MathPow stub.
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- MathPowStub stub(isolate(), MathPowStub::ON_STACK);
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3813,6 +3719,19 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into x0 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3942,18 +3861,6 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
ZoneList<Expression*>* args = expr->arguments();
@@ -3998,6 +3905,9 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ CallRuntime(Runtime::kGetPrototype, 1);
__ Push(result_register());
+ // Load original constructor into x4.
+ __ Peek(x4, 1 * kPointerSize);
+
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
__ Ldr(x11, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -4055,54 +3965,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- DCHECK_NOT_NULL(args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort(kAttemptToUseUndefinedCache);
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- context()->Plug(x0);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = x0;
- Register cache = x1;
- __ Ldr(cache, GlobalObjectMemOperand());
- __ Ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
- __ Ldr(cache, ContextMemOperand(cache,
- Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ Ldr(cache,
- FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
- Label done;
- __ Ldrsw(x2, UntagSmiFieldMemOperand(cache,
- JSFunctionResultCache::kFingerOffset));
- __ Add(x3, cache, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(x3, x3, Operand(x2, LSL, kPointerSizeLog2));
-
- // Load the key and data from the cache.
- __ Ldp(x2, x3, MemOperand(x3));
-
- __ Cmp(key, x2);
- __ CmovX(x0, x3, eq);
- __ B(eq, &done);
-
- // Call runtime to perform the lookup.
- __ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCacheRT, 2);
-
- __ Bind(&done);
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4364,46 +4226,6 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
- // Assert: expr === CallRuntime("ReflectConstruct")
- DCHECK_EQ(1, expr->arguments()->length());
- CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
-
- ZoneList<Expression*>* args = call->arguments();
- DCHECK_EQ(3, args->length());
-
- SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Load ReflectConstruct function
- EmitLoadJSRuntimeFunction(call);
-
- // Push the target function under the receiver.
- __ Pop(x10);
- __ Push(x0, x10);
-
- // Push super constructor
- EmitLoadSuperConstructor(super_call_ref);
- __ Push(result_register());
-
- // Push arguments array
- VisitForStackValue(args->at(1));
-
- // Push NewTarget
- DCHECK(args->at(2)->IsVariableProxy());
- VisitForStackValue(args->at(2));
-
- EmitCallJSRuntimeFunction(call);
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, x0);
-
- // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
- EmitInitializeThisAfterSuper(super_call_ref);
-}
-
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push the builtins object as the receiver.
__ Ldr(x10, GlobalObjectMemOperand());
@@ -4416,7 +4238,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ Mov(LoadDescriptor::NameRegister(), Operand(name));
__ Mov(LoadDescriptor::SlotRegister(),
SmiFromSlot(expr->CallRuntimeFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL);
+ CallLoadIC(NOT_INSIDE_TYPEOF);
}
@@ -4424,7 +4246,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ Peek(x1, (arg_count + 1) * kPointerSize);
__ CallStub(&stub);
@@ -4492,9 +4314,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ Mov(x10, Smi::FromInt(language_mode()));
- __ Push(x10);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy,
+ 2);
context()->Plug(x0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4505,9 +4328,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (var->IsUnallocatedOrGlobalSlot()) {
__ Ldr(x12, GlobalObjectMemOperand());
__ Mov(x11, Operand(var->name()));
- __ Mov(x10, Smi::FromInt(SLOPPY));
- __ Push(x12, x11, x10);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ Push(x12, x11);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
context()->Plug(x0);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
@@ -4596,7 +4418,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- DCHECK(expr->expression()->IsValidReferenceExpression());
+ DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ CountOperation");
@@ -4880,11 +4702,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else if (String::Equals(check, factory->string_string())) {
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof string_string");
__ JumpIfSmi(x0, if_false);
- // Check for undetectable objects => false.
- __ JumpIfObjectType(x0, x0, x1, FIRST_NONSTRING_TYPE, if_false, ge);
- __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
- __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_true, if_false,
- fall_through);
+ __ CompareObjectType(x0, x0, x1, FIRST_NONSTRING_TYPE);
+ Split(lt, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->symbol_string())) {
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof symbol_string");
__ JumpIfSmi(x0, if_false);
@@ -4912,7 +4731,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfObjectType(x0, x10, x11, JS_FUNCTION_TYPE, if_true);
__ CompareAndSplit(x11, JS_FUNCTION_PROXY_TYPE, eq, if_true, if_false,
fall_through);
-
} else if (String::Equals(check, factory->object_string())) {
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string");
__ JumpIfSmi(x0, if_false);
@@ -4928,7 +4746,18 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ TestAndSplit(x10, 1 << Map::kIsUndetectable, if_true, if_false,
fall_through);
-
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(check, factory->type##_string())) { \
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof " \
+ #type "_string"); \
+ __ JumpIfSmi(x0, if_true); \
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset)); \
+ __ CompareRoot(x0, Heap::k##Type##MapRootIndex); \
+ Split(eq, if_true, if_false, fall_through);
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
} else {
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof other");
if (if_false != fall_through) __ B(if_false);
@@ -5067,11 +4896,11 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label suspend, continuation, post_runtime, resume;
__ B(&suspend);
-
// TODO(jbramley): This label is bound here because the following code
// looks at its pos(). Is it possible to do something more efficient here,
// perhaps using Adr?
__ Bind(&continuation);
+ __ RecordGeneratorContinuation();
__ B(&resume);
__ Bind(&suspend);
@@ -5142,12 +4971,13 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
EnterTryBlock(handler_index, &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ Push(x0); // result
- __ B(&l_suspend);
+ __ B(&l_suspend);
// TODO(jbramley): This label is bound here because the following code
// looks at its pos(). Is it possible to do something more efficient here,
// perhaps using Adr?
__ Bind(&l_continuation);
+ __ RecordGeneratorContinuation();
__ B(&l_resume);
__ Bind(&l_suspend);
@@ -5186,6 +5016,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ Mov(x1, x0);
__ Poke(x1, 2 * kPointerSize);
+ SetCallPosition(expr, 1);
CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
__ CallStub(&stub);
@@ -5200,7 +5031,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
__ Mov(LoadDescriptor::SlotRegister(),
SmiFromSlot(expr->DoneFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL); // x0=result.done
+ CallLoadIC(NOT_INSIDE_TYPEOF); // x0=result.done
// The ToBooleanStub argument (result.done) is in x0.
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
@@ -5211,7 +5042,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
__ Mov(LoadDescriptor::SlotRegister(),
SmiFromSlot(expr->ValueFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL); // x0=result.value
+ CallLoadIC(NOT_INSIDE_TYPEOF); // x0=result.value
context()->DropAndPlug(2, x0); // drop iter and g
break;
}
@@ -5394,23 +5225,23 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_script_scope() ||
- declaration_scope->is_module_scope()) {
+ Scope* closure_scope = scope()->ClosureScope();
+ if (closure_scope->is_script_scope() ||
+ closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
DCHECK(kSmiTag == 0);
__ Push(xzr);
- } else if (declaration_scope->is_eval_scope()) {
+ } else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
__ Ldr(x10, ContextMemOperand(cp, Context::CLOSURE_INDEX));
__ Push(x10);
} else {
- DCHECK(declaration_scope->is_function_scope());
+ DCHECK(closure_scope->is_function_scope());
__ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(x10);
}
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen/full-codegen.cc
index 8ca40ccacd..bb7b637d42 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen/full-codegen.cc
@@ -2,16 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/ast.h"
#include "src/ast-numbering.h"
#include "src/code-factory.h"
#include "src/codegen.h"
#include "src/compiler.h"
-#include "src/debug.h"
-#include "src/full-codegen.h"
-#include "src/liveedit.h"
+#include "src/debug/debug.h"
+#include "src/debug/liveedit.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/macro-assembler.h"
#include "src/prettyprinter.h"
#include "src/scopeinfo.h"
@@ -59,7 +57,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
cgen.PopulateHandlerTable(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_has_reloc_info_for_serialization(info->will_serialize());
- code->set_compiled_optimizable(info->IsOptimizable());
code->set_allow_osr_at_loop_nesting_level(0);
code->set_profiler_ticks(0);
code->set_back_edge_table_offset(table_offset);
@@ -205,11 +202,11 @@ void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
}
-void FullCodeGenerator::CallLoadIC(ContextualMode contextual_mode,
+void FullCodeGenerator::CallLoadIC(TypeofMode typeof_mode,
LanguageMode language_mode,
TypeFeedbackId id) {
Handle<Code> ic =
- CodeFactory::LoadIC(isolate(), contextual_mode, language_mode).code();
+ CodeFactory::LoadIC(isolate(), typeof_mode, language_mode).code();
CallIC(ic, id);
}
@@ -274,6 +271,26 @@ bool FullCodeGenerator::ShouldInlineSmiCase(Token::Value op) {
}
+void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Variable* var) const {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ // For simplicity we always test the accumulator register.
+ codegen()->GetVar(result_register(), var);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
void FullCodeGenerator::EffectContext::Plug(Register reg) const {
}
@@ -296,6 +313,9 @@ void FullCodeGenerator::TestContext::Plug(Register reg) const {
}
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {}
+
+
void FullCodeGenerator::EffectContext::PlugTOS() const {
__ Drop(1);
}
@@ -394,6 +414,41 @@ void FullCodeGenerator::VisitDeclarations(
}
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
+ // TODO(rossberg)
+ break;
+
+ case VariableLocation::CONTEXT: {
+ Comment cmnt(masm_, "[ ImportDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ // TODO(rossberg)
+ break;
+ }
+
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::LOOKUP:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+ // TODO(rossberg)
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ EmitVariableLoad(expr);
+}
+
+
int FullCodeGenerator::DeclareGlobalsFlags() {
DCHECK(DeclareGlobalsLanguageMode::is_valid(language_mode()));
return DeclareGlobalsEvalFlag::encode(is_eval()) |
@@ -402,6 +457,59 @@ int FullCodeGenerator::DeclareGlobalsFlags() {
}
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ SubStringStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallStub(&stub);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
+ // Load the arguments on the stack and call the stub.
+ RegExpExecStub stub(isolate());
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 4);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ VisitForStackValue(args->at(3));
+ __ CallStub(&stub);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+ // Load the arguments on the stack and call the runtime function.
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ MathPowStub stub(isolate(), MathPowStub::ON_STACK);
+ __ CallStub(&stub);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(2, args->length());
+
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringCompareStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(result_register());
+}
+
+
bool RecordStatementPosition(MacroAssembler* masm, int pos) {
if (pos == RelocInfo::kNoPosition) return false;
masm->positions_recorder()->RecordStatementPosition(pos);
@@ -424,6 +532,10 @@ void FullCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
RecordStatementPosition(masm_, fun->end_position() - 1);
+ if (info_->is_debug()) {
+ // Always emit a debug break slot before a return.
+ DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
+ }
}
@@ -433,7 +545,7 @@ void FullCodeGenerator::SetStatementPosition(
bool recorded = RecordStatementPosition(masm_, stmt->position());
if (recorded && insert_break == INSERT_BREAK && info_->is_debug() &&
!stmt->IsDebuggerStatement()) {
- DebugCodegen::GenerateSlot(masm_);
+ DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
}
}
@@ -443,7 +555,7 @@ void FullCodeGenerator::SetExpressionPosition(
if (expr->position() == RelocInfo::kNoPosition) return;
bool recorded = RecordPosition(masm_, expr->position());
if (recorded && insert_break == INSERT_BREAK && info_->is_debug()) {
- DebugCodegen::GenerateSlot(masm_);
+ DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
}
}
@@ -451,7 +563,31 @@ void FullCodeGenerator::SetExpressionPosition(
void FullCodeGenerator::SetExpressionAsStatementPosition(Expression* expr) {
if (expr->position() == RelocInfo::kNoPosition) return;
bool recorded = RecordStatementPosition(masm_, expr->position());
- if (recorded && info_->is_debug()) DebugCodegen::GenerateSlot(masm_);
+ if (recorded && info_->is_debug()) {
+ DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
+ }
+}
+
+
+void FullCodeGenerator::SetCallPosition(Expression* expr, int argc) {
+ if (expr->position() == RelocInfo::kNoPosition) return;
+ RecordPosition(masm_, expr->position());
+ if (info_->is_debug()) {
+ // Always emit a debug break slot before a call.
+ DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_CALL,
+ argc);
+ }
+}
+
+
+void FullCodeGenerator::SetConstructCallPosition(Expression* expr) {
+ if (expr->position() == RelocInfo::kNoPosition) return;
+ RecordPosition(masm_, expr->position());
+ if (info_->is_debug()) {
+ // Always emit a debug break slot before a construct call.
+ DebugCodegen::GenerateSlot(masm_,
+ RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL);
+ }
}
@@ -1157,11 +1293,12 @@ void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
VisitForStackValue(lit->constructor());
- __ Push(script());
__ Push(Smi::FromInt(lit->start_position()));
__ Push(Smi::FromInt(lit->end_position()));
- __ CallRuntime(Runtime::kDefineClass, 6);
+ __ CallRuntime(is_strong(language_mode()) ? Runtime::kDefineClassStrong
+ : Runtime::kDefineClass,
+ 5);
PrepareForBailoutForId(lit->CreateLiteralId(), TOS_REG);
int store_slot_index = 0;
@@ -1169,9 +1306,11 @@ void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
if (lit->scope() != NULL) {
DCHECK_NOT_NULL(lit->class_variable_proxy());
- FeedbackVectorICSlot slot = FLAG_vector_stores
- ? lit->GetNthSlot(store_slot_index++)
- : FeedbackVectorICSlot::Invalid();
+ FeedbackVectorICSlot slot =
+ FLAG_vector_stores &&
+ lit->class_variable_proxy()->var()->IsUnallocated()
+ ? lit->GetNthSlot(store_slot_index++)
+ : FeedbackVectorICSlot::Invalid();
EmitVariableAssignment(lit->class_variable_proxy()->var(),
Token::INIT_CONST, slot);
}
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen/full-codegen.h
index b4294f5856..34a5dc0454 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen/full-codegen.h
@@ -2,10 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FULL_CODEGEN_H_
-#define V8_FULL_CODEGEN_H_
-
-#include "src/v8.h"
+#ifndef V8_FULL_CODEGEN_FULL_CODEGEN_H_
+#define V8_FULL_CODEGEN_FULL_CODEGEN_H_
#include "src/allocation.h"
#include "src/assert-scope.h"
@@ -44,7 +42,7 @@ class FullCodeGenerator: public AstVisitor {
globals_(NULL),
context_(NULL),
bailout_entries_(info->HasDeoptimizationSupport()
- ? info->function()->ast_node_count()
+ ? info->literal()->ast_node_count()
: 0,
info->zone()),
back_edges_(2, info->zone()),
@@ -511,8 +509,8 @@ class FullCodeGenerator: public AstVisitor {
F(ObjectEquals) \
F(IsObject) \
F(IsFunction) \
- F(IsUndetectableObject) \
F(IsSpecObject) \
+ F(IsSimdValue) \
F(IsStringWrapperSafeForDefaultValueOf) \
F(MathPow) \
F(IsMinusZero) \
@@ -529,10 +527,9 @@ class FullCodeGenerator: public AstVisitor {
F(StringCompare) \
F(RegExpExec) \
F(RegExpConstructResult) \
- F(GetFromCache) \
F(NumberToString) \
- F(DebugIsActive) \
- F(CallSuperWithSpread)
+ F(ToObject) \
+ F(DebugIsActive)
#define GENERATOR_DECLARATION(Name) void Emit##Name(CallRuntime* call);
FOR_EACH_FULL_CODE_INTRINSIC(GENERATOR_DECLARATION)
@@ -545,16 +542,13 @@ class FullCodeGenerator: public AstVisitor {
// Platform-specific code for loading variables.
void EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofState typeof_state,
- Label* slow);
+ TypeofMode typeof_mode, Label* slow);
MemOperand ContextSlotOperandCheckExtensions(Variable* var, Label* slow);
- void EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofState typeof_state,
- Label* slow,
- Label* done);
- void EmitGlobalVariableLoad(VariableProxy* proxy, TypeofState typeof_state);
+ void EmitDynamicLookupFastCase(VariableProxy* proxy, TypeofMode typeof_mode,
+ Label* slow, Label* done);
+ void EmitGlobalVariableLoad(VariableProxy* proxy, TypeofMode typeof_mode);
void EmitVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
void EmitAccessor(Expression* expression);
@@ -647,14 +641,12 @@ class FullCodeGenerator: public AstVisitor {
FeedbackVectorICSlot slot = FeedbackVectorICSlot::Invalid());
void EmitLoadSuperConstructor(SuperCallReference* super_call_ref);
- void EmitInitializeThisAfterSuper(
- SuperCallReference* super_call_ref,
- FeedbackVectorICSlot slot = FeedbackVectorICSlot::Invalid());
void CallIC(Handle<Code> code,
TypeFeedbackId id = TypeFeedbackId::None());
- void CallLoadIC(ContextualMode mode, LanguageMode language_mode = SLOPPY,
+ // Inside typeof reference errors are never thrown.
+ void CallLoadIC(TypeofMode typeof_mode, LanguageMode language_mode = SLOPPY,
TypeFeedbackId id = TypeFeedbackId::None());
void CallStoreIC(TypeFeedbackId id = TypeFeedbackId::None());
@@ -676,6 +668,10 @@ class FullCodeGenerator: public AstVisitor {
// This is used in loop headers where we want to break for each iteration.
void SetExpressionAsStatementPosition(Expression* expr);
+ void SetCallPosition(Expression* expr, int argc);
+
+ void SetConstructCallPosition(Expression* expr);
+
// Non-local control flow support.
void EnterTryBlock(int handler_index, Label* handler);
void ExitTryBlock(int handler_index);
@@ -701,8 +697,9 @@ class FullCodeGenerator: public AstVisitor {
bool is_eval() { return info_->is_eval(); }
bool is_native() { return info_->is_native(); }
LanguageMode language_mode() { return function()->language_mode(); }
- bool is_simple_parameter_list() { return info_->is_simple_parameter_list(); }
- FunctionLiteral* function() { return info_->function(); }
+ bool has_simple_parameters() { return info_->has_simple_parameters(); }
+ // TODO(titzer): rename this to literal().
+ FunctionLiteral* function() { return info_->literal(); }
Scope* scope() { return scope_; }
static Register result_register();
@@ -1088,4 +1085,4 @@ class BackEdgeTable {
} } // namespace v8::internal
-#endif // V8_FULL_CODEGEN_H_
+#endif // V8_FULL_CODEGEN_FULL_CODEGEN_H_
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
index 535f2c2c63..5aa6409441 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
-#include "src/debug.h"
-#include "src/full-codegen.h"
+#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/ia32/frames-ia32.h"
#include "src/ic/ic.h"
#include "src/parser.h"
#include "src/scopes.h"
@@ -102,7 +101,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
@@ -140,7 +139,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(function()->kind()) || locals_count == 0);
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
@@ -320,7 +319,7 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !is_simple_parameter_list()) {
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
@@ -347,18 +346,14 @@ void FullCodeGenerator::Generate() {
} else {
PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- DCHECK(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
- VisitVariableDeclaration(function);
- }
VisitDeclarations(scope()->declarations());
}
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
+
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
@@ -464,42 +459,19 @@ void FullCodeGenerator::EmitReturnSequence() {
__ pop(eax);
EmitProfilingCounterReset();
__ bind(&ok);
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
+
SetReturnPosition(function());
- __ RecordJSReturn();
- // Do not use the leave instruction here because it is too short to
- // patch with the code required by the debugger.
- __ mov(esp, ebp);
int no_frame_start = masm_->pc_offset();
- __ pop(ebp);
+ __ leave();
int arg_count = info_->scope()->num_parameters() + 1;
int arguments_bytes = arg_count * kPointerSize;
__ Ret(arguments_bytes, ecx);
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- DCHECK(Assembler::kJSReturnSequenceLength <=
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand operand = codegen()->VarOperand(var, result_register());
@@ -508,14 +480,6 @@ void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
}
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- // For simplicity we always test the accumulator register.
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
UNREACHABLE(); // Not used on IA32.
}
@@ -666,10 +630,6 @@ void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
}
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
Handle<Object> value = flag
? isolate()->factory()->true_value()
@@ -853,13 +813,9 @@ void FullCodeGenerator::VisitVariableDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
- __ push(esi);
__ push(Immediate(variable->name()));
// VariableDeclaration nodes are always introduced in one of four modes.
DCHECK(IsDeclaredVariableMode(mode));
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
- __ push(Immediate(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
@@ -869,7 +825,10 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
}
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ __ CallRuntime(IsImmutableVariableMode(mode)
+ ? Runtime::kDeclareReadOnlyLookupSlot
+ : Runtime::kDeclareLookupSlot,
+ 2);
break;
}
}
@@ -919,52 +878,20 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
- __ push(esi);
__ push(Immediate(variable->name()));
- __ push(Immediate(Smi::FromInt(NONE)));
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
break;
}
}
}
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case VariableLocation::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- case VariableLocation::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
- __ push(esi); // The context is the first argument.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kDeclareGlobals, 2);
// Return value is ignored.
}
@@ -1097,8 +1024,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &done_convert, Label::kNear);
__ bind(&convert);
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
__ bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ push(eax);
@@ -1275,12 +1202,6 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
}
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
int offset,
FeedbackVectorICSlot slot) {
@@ -1297,7 +1218,7 @@ void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofState typeof_state,
+ TypeofMode typeof_mode,
Label* slow) {
Register context = esi;
Register temp = edx;
@@ -1346,7 +1267,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
// All extension objects were empty and it is safe to use a normal global
// load machinery.
- EmitGlobalVariableLoad(proxy, typeof_state);
+ EmitGlobalVariableLoad(proxy, typeof_mode);
}
@@ -1381,9 +1302,8 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
+ TypeofMode typeof_mode,
+ Label* slow, Label* done) {
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
@@ -1391,7 +1311,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
// containing the eval.
Variable* var = proxy->var();
if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
+ EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
__ jmp(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
@@ -1413,22 +1333,36 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
+ TypeofMode typeof_mode) {
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), var->name());
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
- // Inside typeof use a regular load, not a contextual load, to avoid
- // a reference error.
- CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
+ if (var->IsGlobalSlot()) {
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ int const slot = var->index();
+ int const depth = scope()->ContextChainLength(var->scope());
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ Move(LoadGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
+ LoadGlobalViaContextStub stub(isolate(), depth);
+ __ CallStub(&stub);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+
+ } else {
+ __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), var->name());
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
+ }
}
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
+ TypeofMode typeof_mode) {
SetExpressionPosition(proxy);
PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
Variable* var = proxy->var();
@@ -1439,7 +1373,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- EmitGlobalVariableLoad(proxy, typeof_state);
+ EmitGlobalVariableLoad(proxy, typeof_mode);
context()->Plug(eax);
break;
}
@@ -1447,7 +1381,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
case VariableLocation::CONTEXT: {
- DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (var->binding_needs_init()) {
@@ -1478,8 +1412,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
} else if (var->is_this()) {
- CHECK(info_->function() != nullptr &&
- (info_->function()->kind() & kSubclassConstructor) != 0);
+ CHECK(function() != nullptr &&
+ (function()->kind() & kSubclassConstructor) != 0);
// TODO(dslomov): implement 'this' hole check elimination.
skip_init_check = false;
} else {
@@ -1520,12 +1454,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
__ bind(&slow);
__ push(esi); // Context.
__ push(Immediate(var->name()));
Runtime::FunctionId function_id =
- typeof_state == NOT_INSIDE_TYPEOF
+ typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
__ CallRuntime(function_id, 2);
@@ -1926,7 +1860,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpression());
+ DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
SetExpressionPosition(expr, INSERT_BREAK);
@@ -2080,8 +2014,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label suspend, continuation, post_runtime, resume;
__ jmp(&suspend);
-
__ bind(&continuation);
+ __ RecordGeneratorContinuation();
__ jmp(&resume);
__ bind(&suspend);
@@ -2154,9 +2088,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
EnterTryBlock(handler_index, &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(eax); // result
+
__ jmp(&l_suspend);
__ bind(&l_continuation);
+ __ RecordGeneratorContinuation();
__ jmp(&l_resume);
+
__ bind(&l_suspend);
const int generator_object_depth = kPointerSize + try_block_size;
__ mov(eax, Operand(esp, generator_object_depth));
@@ -2194,6 +2131,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ mov(edi, eax);
__ mov(Operand(esp, 2 * kPointerSize), edi);
+ SetCallPosition(expr, 1);
CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
__ CallStub(&stub);
@@ -2208,7 +2146,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
isolate()->factory()->done_string()); // "done"
__ mov(LoadDescriptor::SlotRegister(),
Immediate(SmiFromSlot(expr->DoneFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL); // result.done in eax
+ CallLoadIC(NOT_INSIDE_TYPEOF); // result.done in eax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
__ test(eax, eax);
@@ -2220,7 +2158,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
isolate()->factory()->value_string()); // "value"
__ mov(LoadDescriptor::SlotRegister(),
Immediate(SmiFromSlot(expr->ValueFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL); // result.value in eax
+ CallLoadIC(NOT_INSIDE_TYPEOF); // result.value in eax
context()->DropAndPlug(2, eax); // drop iter and g
break;
}
@@ -2359,7 +2297,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
__ mov(LoadDescriptor::SlotRegister(),
Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL, language_mode());
+ CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
}
@@ -2545,11 +2483,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
}
}
- // prototype
- __ CallRuntime(Runtime::kToFastProperties, 1);
-
- // constructor
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ // Set both the prototype and constructor to have fast properties, and also
+ // freeze them in strong mode.
+ __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
}
@@ -2566,7 +2502,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
FeedbackVectorICSlot slot) {
- DCHECK(expr->IsValidReferenceExpression());
+ DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
LhsKind assign_type = Property::GetAssignType(prop);
@@ -2657,13 +2593,33 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
FeedbackVectorICSlot slot) {
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), var->name());
__ mov(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
+ } else if (var->IsGlobalSlot()) {
+ // Global var, const, or let.
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ int const slot = var->index();
+ int const depth = scope()->ContextChainLength(var->scope());
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ Move(StoreGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
+ DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(eax));
+ StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
+ __ CallStub(&stub);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ Push(eax);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
+
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2692,6 +2648,19 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&const_error);
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ } else if (var->is_this() && op == Token::INIT_CONST) {
+ // Initializing assignment to const {this} needs a write barrier.
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label uninitialized_this;
+ MemOperand location = VarOperand(var, ecx);
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(equal, &uninitialized_this);
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@@ -2995,7 +2964,7 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
__ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
@@ -3034,22 +3003,6 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
-void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperCallReference* super_call_ref, FeedbackVectorICSlot slot) {
- Variable* this_var = super_call_ref->this_var()->var();
- GetVar(ecx, this_var);
- __ cmp(ecx, isolate()->factory()->the_hole_value());
-
- Label uninitialized_this;
- __ j(equal, &uninitialized_this);
- __ push(Immediate(this_var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&uninitialized_this);
-
- EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
-}
-
-
// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
VariableProxy* callee = expr->expression()->AsVariableProxy();
@@ -3126,7 +3079,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -3197,7 +3150,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetExpressionPosition(expr);
+ SetConstructCallPosition(expr);
// Load function and argument count into edi and eax.
__ Move(eax, Immediate(arg_count));
@@ -3225,9 +3178,6 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- VariableProxy* new_target_proxy = super_call_ref->new_target_var();
- VisitForStackValue(new_target_proxy);
-
EmitLoadSuperConstructor(super_call_ref);
__ push(result_register());
@@ -3240,7 +3190,11 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetExpressionPosition(expr);
+ SetConstructCallPosition(expr);
+
+ // Load original constructor into ecx.
+ VisitForAccumulatorValue(super_call_ref->new_target_var());
+ __ mov(ecx, result_register());
// Load function and argument count into edi and eax.
__ Move(eax, Immediate(arg_count));
@@ -3262,11 +3216,8 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
__ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
-
RecordJSReturnSite(expr);
- EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
context()->Plug(eax);
}
@@ -3367,7 +3318,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3377,15 +3328,13 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
__ JumpIfSmi(eax, if_false);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsUndetectable));
+ __ CmpObjectType(eax, SIMD128_VALUE_TYPE, ebx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(not_zero, if_true, if_false, fall_through);
+ Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3777,33 +3726,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3951,19 +3873,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- MathPowStub stub(isolate(), MathPowStub::ON_STACK);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -4006,6 +3915,19 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into eax and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4132,19 +4054,6 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() >= 2);
@@ -4189,11 +4098,14 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ CallRuntime(Runtime::kGetPrototype, 1);
__ push(result_register());
+ // Load original constructor into ecx.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ mov(ebx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor_frame);
// default constructor has no arguments, so no adaptor frame means no args.
__ mov(eax, Immediate(0));
@@ -4202,17 +4114,17 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
// Copy arguments from adaptor frame.
{
__ bind(&adaptor_frame);
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(ecx);
+ __ mov(ebx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(ebx);
- __ mov(eax, ecx);
- __ lea(edx, Operand(edx, ecx, times_pointer_size,
+ __ mov(eax, ebx);
+ __ lea(edx, Operand(edx, ebx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
Label loop;
__ bind(&loop);
__ push(Operand(edx, -1 * kPointerSize));
__ sub(edx, Immediate(kPointerSize));
- __ dec(ecx);
+ __ dec(ebx);
__ j(not_zero, &loop);
}
@@ -4244,55 +4156,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
-
- DCHECK_NOT_NULL(args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort(kAttemptToUseUndefinedCache);
- __ mov(eax, isolate()->factory()->undefined_value());
- context()->Plug(eax);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = eax;
- Register cache = ebx;
- Register tmp = ecx;
- __ mov(cache, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
- __ mov(cache,
- FieldOperand(cache, GlobalObject::kNativeContextOffset));
- __ mov(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ mov(cache,
- FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
- Label done, not_found;
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ mov(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
- // tmp now holds finger offset as a smi.
- __ cmp(key, FixedArrayElementOperand(cache, tmp));
- __ j(not_equal, &not_found);
-
- __ mov(eax, FixedArrayElementOperand(cache, tmp, 1));
- __ jmp(&done);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ push(cache);
- __ push(key);
- __ CallRuntime(Runtime::kGetFromCacheRT, 2);
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4605,46 +4468,6 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
- // Assert: expr == CallRuntime("ReflectConstruct")
- DCHECK_EQ(1, expr->arguments()->length());
- CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
-
- ZoneList<Expression*>* args = call->arguments();
- DCHECK_EQ(3, args->length());
-
- SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Load ReflectConstruct function
- EmitLoadJSRuntimeFunction(call);
-
- // Push the target function under the receiver
- __ push(Operand(esp, 0));
- __ mov(Operand(esp, kPointerSize), eax);
-
- // Push super constructor
- EmitLoadSuperConstructor(super_call_ref);
- __ Push(result_register());
-
- // Push arguments array
- VisitForStackValue(args->at(1));
-
- // Push NewTarget
- DCHECK(args->at(2)->IsVariableProxy());
- VisitForStackValue(args->at(2));
-
- EmitCallJSRuntimeFunction(call);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax);
-
- // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
- EmitInitializeThisAfterSuper(super_call_ref);
-}
-
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push the builtins object as receiver.
__ mov(eax, GlobalObjectOperand());
@@ -4655,7 +4478,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ mov(LoadDescriptor::NameRegister(), Immediate(expr->name()));
__ mov(LoadDescriptor::SlotRegister(),
Immediate(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
+ CallLoadIC(NOT_INSIDE_TYPEOF);
}
@@ -4663,7 +4486,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -4731,8 +4554,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy,
+ 2);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4743,8 +4568,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (var->IsUnallocatedOrGlobalSlot()) {
__ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(SLOPPY)));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
context()->Plug(eax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global variables is false. 'this' is
@@ -4840,7 +4664,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- DCHECK(expr->expression()->IsValidReferenceExpression());
+ DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ CountOperation");
@@ -5124,11 +4948,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
- __ j(above_equal, if_false);
- // Check for undetectable objects => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- Split(zero, if_true, if_false, fall_through);
+ Split(below, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->symbol_string())) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, SYMBOL_TYPE, edx);
@@ -5166,6 +4986,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
Split(zero, if_true, if_false, fall_through);
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(check, factory->type##_string())) { \
+ __ JumpIfSmi(eax, if_false); \
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset), \
+ isolate()->factory()->type##_map()); \
+ Split(equal, if_true, if_false, fall_through);
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
} else {
if (if_false != fall_through) __ jmp(if_false);
}
@@ -5303,21 +5133,21 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_script_scope() ||
- declaration_scope->is_module_scope()) {
+ Scope* closure_scope = scope()->ClosureScope();
+ if (closure_scope->is_script_scope() ||
+ closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
__ push(Immediate(Smi::FromInt(0)));
- } else if (declaration_scope->is_eval_scope()) {
+ } else if (closure_scope->is_eval_scope()) {
// Contexts nested inside eval code have the same closure as the context
// calling eval, not the anonymous closure containing the eval code.
// Fetch it from the context.
__ push(ContextOperand(esi, Context::CLOSURE_INDEX));
} else {
- DCHECK(declaration_scope->is_function_scope());
+ DCHECK(closure_scope->is_function_scope());
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
}
diff --git a/deps/v8/src/full-codegen/mips/OWNERS b/deps/v8/src/full-codegen/mips/OWNERS
new file mode 100644
index 0000000000..5508ba626f
--- /dev/null
+++ b/deps/v8/src/full-codegen/mips/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
index 841ee4b995..c8da77fba2 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
// Note on Mips implementation:
@@ -18,8 +16,8 @@
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
-#include "src/debug.h"
-#include "src/full-codegen.h"
+#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
#include "src/parser.h"
#include "src/scopes.h"
@@ -122,7 +120,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop-at");
}
#endif
@@ -158,7 +156,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
if (locals_count > 0) {
if (locals_count >= 128) {
Label ok;
@@ -337,7 +335,7 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !is_simple_parameter_list()) {
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
@@ -363,18 +361,14 @@ void FullCodeGenerator::Generate() {
} else {
PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- DCHECK(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
- VisitVariableDeclaration(function);
- }
VisitDeclarations(scope()->declarations());
}
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
+
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
@@ -494,11 +488,6 @@ void FullCodeGenerator::EmitReturnSequence() {
EmitProfilingCounterReset();
__ bind(&ok);
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
@@ -507,7 +496,6 @@ void FullCodeGenerator::EmitReturnSequence() {
int32_t arg_count = info_->scope()->num_parameters() + 1;
int32_t sp_delta = arg_count * kPointerSize;
SetReturnPosition(function());
- __ RecordJSReturn();
masm_->mov(sp, fp);
int no_frame_start = masm_->pc_offset();
masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
@@ -515,28 +503,10 @@ void FullCodeGenerator::EmitReturnSequence() {
masm_->Jump(ra);
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
-
-#ifdef DEBUG
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- DCHECK(Assembler::kJSReturnSequenceInstructions <=
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
-#endif
}
}
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
@@ -544,14 +514,6 @@ void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
}
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- // For simplicity we always test the accumulator register.
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
}
@@ -712,10 +674,6 @@ void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
}
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
@@ -909,22 +867,21 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ li(a2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
DCHECK(IsDeclaredVariableMode(mode));
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
- __ li(a1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (hole_init) {
__ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
- __ Push(cp, a2, a1, a0);
} else {
DCHECK(Smi::FromInt(0) == 0);
__ mov(a0, zero_reg); // Smi::FromInt(0) indicates no initial value.
- __ Push(cp, a2, a1, a0);
}
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ __ Push(a2, a0);
+ __ CallRuntime(IsImmutableVariableMode(mode)
+ ? Runtime::kDeclareReadOnlyLookupSlot
+ : Runtime::kDeclareLookupSlot,
+ 2);
break;
}
}
@@ -977,53 +934,22 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ li(a2, Operand(variable->name()));
- __ li(a1, Operand(Smi::FromInt(NONE)));
- __ Push(cp, a2, a1);
+ __ Push(a2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
break;
}
}
}
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case VariableLocation::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- case VariableLocation::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
- // The context is the first argument.
__ li(a1, Operand(pairs));
__ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
- __ Push(cp, a1, a0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ Push(a1, a0);
+ __ CallRuntime(Runtime::kDeclareGlobals, 2);
// Return value is ignored.
}
@@ -1157,8 +1083,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ GetObjectType(a0, a1, a1);
__ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
__ bind(&convert);
- __ push(a0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
__ mov(a0, v0);
__ bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
@@ -1342,12 +1268,6 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
}
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
int offset,
FeedbackVectorICSlot slot) {
@@ -1364,7 +1284,7 @@ void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofState typeof_state,
+ TypeofMode typeof_mode,
Label* slow) {
Register current = cp;
Register next = a1;
@@ -1410,7 +1330,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
// All extension objects were empty and it is safe to use a normal global
// load machinery.
- EmitGlobalVariableLoad(proxy, typeof_state);
+ EmitGlobalVariableLoad(proxy, typeof_mode);
}
@@ -1445,9 +1365,8 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
+ TypeofMode typeof_mode,
+ Label* slow, Label* done) {
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
@@ -1455,7 +1374,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
// containing the eval.
Variable* var = proxy->var();
if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
+ EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
__ Branch(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
@@ -1480,22 +1399,36 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
+ TypeofMode typeof_mode) {
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ lw(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- // Inside typeof use a regular load, not a contextual load, to avoid
- // a reference error.
- CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
+ if (var->IsGlobalSlot()) {
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ int const slot = var->index();
+ int const depth = scope()->ContextChainLength(var->scope());
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ li(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ LoadGlobalViaContextStub stub(isolate(), depth);
+ __ CallStub(&stub);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+
+ } else {
+ __ lw(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
+ }
}
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
+ TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
@@ -1507,7 +1440,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- EmitGlobalVariableLoad(proxy, typeof_state);
+ EmitGlobalVariableLoad(proxy, typeof_mode);
context()->Plug(v0);
break;
}
@@ -1515,7 +1448,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
case VariableLocation::CONTEXT: {
- DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (var->binding_needs_init()) {
@@ -1546,8 +1479,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
} else if (var->is_this()) {
- CHECK(info_->function() != nullptr &&
- (info_->function()->kind() & kSubclassConstructor) != 0);
+ CHECK(info_->has_literal() &&
+ (info_->literal()->kind() & kSubclassConstructor) != 0);
// TODO(dslomov): implement 'this' hole check elimination.
skip_init_check = false;
} else {
@@ -1591,12 +1524,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
__ bind(&slow);
__ li(a1, Operand(var->name()));
__ Push(cp, a1); // Context and name.
Runtime::FunctionId function_id =
- typeof_state == NOT_INSIDE_TYPEOF
+ typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
__ CallRuntime(function_id, 2);
@@ -1993,7 +1926,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpression());
+ DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
SetExpressionPosition(expr, INSERT_BREAK);
@@ -2152,8 +2085,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label suspend, continuation, post_runtime, resume;
__ jmp(&suspend);
-
__ bind(&continuation);
+ __ RecordGeneratorContinuation();
__ jmp(&resume);
__ bind(&suspend);
@@ -2224,10 +2157,13 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
EnterTryBlock(handler_index, &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(a0); // result
+
__ jmp(&l_suspend);
__ bind(&l_continuation);
+ __ RecordGeneratorContinuation();
__ mov(a0, v0);
__ jmp(&l_resume);
+
__ bind(&l_suspend);
const int generator_object_depth = kPointerSize + try_block_size;
__ lw(a0, MemOperand(sp, generator_object_depth));
@@ -2266,6 +2202,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a0, v0);
__ mov(a1, a0);
__ sw(a1, MemOperand(sp, 2 * kPointerSize));
+ SetCallPosition(expr, 1);
CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
__ CallStub(&stub);
@@ -2279,7 +2216,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
__ li(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL); // v0=result.done
+ CallLoadIC(NOT_INSIDE_TYPEOF); // v0=result.done
__ mov(a0, v0);
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
@@ -2290,7 +2227,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
__ li(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL); // v0=result.value
+ CallLoadIC(NOT_INSIDE_TYPEOF); // v0=result.value
context()->DropAndPlug(2, v0); // drop iter and g
break;
}
@@ -2432,7 +2369,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ li(LoadDescriptor::NameRegister(), Operand(key->value()));
__ li(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL, language_mode());
+ CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
}
@@ -2624,11 +2561,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
}
}
- // prototype
- __ CallRuntime(Runtime::kToFastProperties, 1);
-
- // constructor
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ // Set both the prototype and constructor to have fast properties, and also
+ // freeze them in strong mode.
+ __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
}
@@ -2646,7 +2581,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
FeedbackVectorICSlot slot) {
- DCHECK(expr->IsValidReferenceExpression());
+ DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
LhsKind assign_type = Property::GetAssignType(prop);
@@ -2739,7 +2674,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
FeedbackVectorICSlot slot) {
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::ValueRegister(), result_register());
__ li(StoreDescriptor::NameRegister(), Operand(var->name()));
@@ -2747,6 +2682,27 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
+ } else if (var->IsGlobalSlot()) {
+ // Global var, const, or let.
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(a0));
+ __ mov(StoreGlobalViaContextDescriptor::ValueRegister(), result_register());
+ int const slot = var->index();
+ int const depth = scope()->ContextChainLength(var->scope());
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ li(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
+ __ CallStub(&stub);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ Push(a0);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
+
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2778,6 +2734,20 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&const_error);
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ } else if (var->is_this() && op == Token::INIT_CONST) {
+ // Initializing assignment to const {this} needs a write barrier.
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label uninitialized_this;
+ MemOperand location = VarOperand(var, a1);
+ __ lw(a3, location);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&uninitialized_this, eq, a3, Operand(at));
+ __ li(a0, Operand(var->name()));
+ __ Push(a0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@@ -3089,7 +3059,7 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
}
// Record source position of the IC call.
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
__ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -3127,22 +3097,6 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
-void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperCallReference* super_ref, FeedbackVectorICSlot slot) {
- Variable* this_var = super_ref->this_var()->var();
- GetVar(a1, this_var);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- Label uninitialized_this;
- __ Branch(&uninitialized_this, eq, a1, Operand(at));
- __ li(a0, Operand(this_var->name()));
- __ Push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&uninitialized_this);
-
- EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
-}
-
-
// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
VariableProxy* callee = expr->expression()->AsVariableProxy();
@@ -3205,25 +3159,25 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- PushCalleeAndWithBaseObject(expr);
+ PushCalleeAndWithBaseObject(expr);
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(a1);
- EmitResolvePossiblyDirectEval(arg_count);
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ push(a1);
+ EmitResolvePossiblyDirectEval(arg_count);
- // Touch up the stack with the resolved function.
- __ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ // Touch up the stack with the resolved function.
+ __ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -3294,7 +3248,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetExpressionPosition(expr);
+ SetConstructCallPosition(expr);
// Load function and argument count into a1 and a0.
__ li(a0, Operand(arg_count));
@@ -3322,9 +3276,6 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- VariableProxy* new_target_proxy = super_call_ref->new_target_var();
- VisitForStackValue(new_target_proxy);
-
EmitLoadSuperConstructor(super_call_ref);
__ push(result_register());
@@ -3337,7 +3288,11 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetExpressionPosition(expr);
+ SetConstructCallPosition(expr);
+
+ // Load original constructor into t0.
+ VisitForAccumulatorValue(super_call_ref->new_target_var());
+ __ mov(t0, result_register());
// Load function and argument count into a1 and a0.
__ li(a0, Operand(arg_count));
@@ -3359,11 +3314,8 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
-
RecordJSReturnSite(expr);
- EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
context()->Plug(v0);
}
@@ -3464,7 +3416,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3474,15 +3426,13 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
__ JumpIfSmi(v0, if_false);
- __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
+ __ GetObjectType(v0, a1, a1);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ And(at, a1, Operand(1 << Map::kIsUndetectable));
- Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
+ Split(eq, a1, Operand(SIMD128_VALUE_TYPE), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3873,33 +3823,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4055,18 +3978,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- MathPowStub stub(isolate(), MathPowStub::ON_STACK);
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -4110,6 +4021,20 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into a0 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+ __ mov(a0, result_register());
+
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4239,19 +4164,6 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() >= 2);
@@ -4296,6 +4208,9 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ CallRuntime(Runtime::kGetPrototype, 1);
__ Push(result_register());
+ // Load original constructor into t0.
+ __ lw(t0, MemOperand(sp, 1 * kPointerSize));
+
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
__ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -4359,60 +4274,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
-
- DCHECK_NOT_NULL(args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort(kAttemptToUseUndefinedCache);
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- context()->Plug(v0);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = v0;
- Register cache = a1;
- __ lw(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
- __ lw(cache,
- ContextOperand(
- cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ lw(cache,
- FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
-
- Label done, not_found;
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ lw(a2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
- // a2 now holds finger offset as a smi.
- __ Addu(a3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // a3 now points to the start of fixed array elements.
- __ sll(at, a2, kPointerSizeLog2 - kSmiTagSize);
- __ addu(a3, a3, at);
- // a3 now points to key of indexed element of cache.
- __ lw(a2, MemOperand(a3));
- __ Branch(&not_found, ne, key, Operand(a2));
-
- __ lw(v0, MemOperand(a3, kPointerSize));
- __ Branch(&done);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCacheRT, 2);
-
- __ bind(&done);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4691,47 +4552,6 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
- // Assert: expr == CallRuntime("ReflectConstruct")
- DCHECK_EQ(1, expr->arguments()->length());
- CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
-
- ZoneList<Expression*>* args = call->arguments();
- DCHECK_EQ(3, args->length());
-
- SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Load ReflectConstruct function
- EmitLoadJSRuntimeFunction(call);
-
- // Push the target function under the receiver
- __ lw(at, MemOperand(sp, 0));
- __ push(at);
- __ sw(v0, MemOperand(sp, kPointerSize));
-
- // Push super constructor
- EmitLoadSuperConstructor(super_call_ref);
- __ Push(result_register());
-
- // Push arguments array
- VisitForStackValue(args->at(1));
-
- // Push NewTarget
- DCHECK(args->at(2)->IsVariableProxy());
- VisitForStackValue(args->at(2));
-
- EmitCallJSRuntimeFunction(call);
-
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, v0);
-
- // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
- EmitInitializeThisAfterSuper(super_call_ref);
-}
-
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push the builtins object as the receiver.
Register receiver = LoadDescriptor::ReceiverRegister();
@@ -4743,7 +4563,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ li(LoadDescriptor::NameRegister(), Operand(expr->name()));
__ li(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
+ CallLoadIC(NOT_INSIDE_TYPEOF);
}
@@ -4751,7 +4571,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -4821,9 +4641,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ li(a1, Operand(Smi::FromInt(language_mode())));
- __ push(a1);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy,
+ 2);
context()->Plug(v0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4834,9 +4655,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (var->IsUnallocatedOrGlobalSlot()) {
__ lw(a2, GlobalObjectOperand());
__ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(SLOPPY)));
- __ Push(a2, a1, a0);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ Push(a2, a1);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
context()->Plug(v0);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
@@ -4926,7 +4746,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- DCHECK(expr->expression()->IsValidReferenceExpression());
+ DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ CountOperation");
@@ -5211,13 +5031,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
} else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(v0, if_false);
- // Check for undetectable objects => false.
__ GetObjectType(v0, v0, a1);
- __ Branch(if_false, ge, a1, Operand(FIRST_NONSTRING_TYPE));
- __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
- __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
- Split(eq, a1, Operand(zero_reg),
- if_true, if_false, fall_through);
+ Split(lt, a1, Operand(FIRST_NONSTRING_TYPE), if_true, if_false,
+ fall_through);
} else if (String::Equals(check, factory->symbol_string())) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, v0, a1);
@@ -5256,6 +5072,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
__ And(a1, a1, Operand(1 << Map::kIsUndetectable));
Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(check, factory->type##_string())) { \
+ __ JumpIfSmi(v0, if_false); \
+ __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset)); \
+ __ LoadRoot(at, Heap::k##Type##MapRootIndex); \
+ Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
} else {
if (if_false != fall_through) __ jmp(if_false);
}
@@ -5388,21 +5214,21 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_script_scope() ||
- declaration_scope->is_module_scope()) {
+ Scope* closure_scope = scope()->ClosureScope();
+ if (closure_scope->is_script_scope() ||
+ closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
__ li(at, Operand(Smi::FromInt(0)));
- } else if (declaration_scope->is_eval_scope()) {
+ } else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
__ lw(at, ContextOperand(cp, Context::CLOSURE_INDEX));
} else {
- DCHECK(declaration_scope->is_function_scope());
+ DCHECK(closure_scope->is_function_scope());
__ lw(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
__ push(at);
diff --git a/deps/v8/src/full-codegen/mips64/OWNERS b/deps/v8/src/full-codegen/mips64/OWNERS
new file mode 100644
index 0000000000..5508ba626f
--- /dev/null
+++ b/deps/v8/src/full-codegen/mips64/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/deps/v8/src/mips64/full-codegen-mips64.cc b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
index 569dc51afc..231e8ba384 100644
--- a/deps/v8/src/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
// Note on Mips implementation:
@@ -18,8 +16,8 @@
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
-#include "src/debug.h"
-#include "src/full-codegen.h"
+#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
#include "src/parser.h"
#include "src/scopes.h"
@@ -122,7 +120,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop-at");
}
#endif
@@ -155,7 +153,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
if (locals_count > 0) {
if (locals_count >= 128) {
Label ok;
@@ -333,7 +331,7 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !is_simple_parameter_list()) {
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
@@ -358,17 +356,14 @@ void FullCodeGenerator::Generate() {
} else {
PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- DCHECK(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
- VisitVariableDeclaration(function);
- }
VisitDeclarations(scope()->declarations());
}
+
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
+
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
@@ -490,12 +485,6 @@ void FullCodeGenerator::EmitReturnSequence() {
EmitProfilingCounterReset();
__ bind(&ok);
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
-
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
@@ -504,7 +493,6 @@ void FullCodeGenerator::EmitReturnSequence() {
int32_t arg_count = info_->scope()->num_parameters() + 1;
int32_t sp_delta = arg_count * kPointerSize;
SetReturnPosition(function());
- __ RecordJSReturn();
masm_->mov(sp, fp);
int no_frame_start = masm_->pc_offset();
masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
@@ -512,28 +500,10 @@ void FullCodeGenerator::EmitReturnSequence() {
masm_->Jump(ra);
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
-
-#ifdef DEBUG
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- DCHECK(Assembler::kJSReturnSequenceInstructions <=
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
-#endif
}
}
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
@@ -541,14 +511,6 @@ void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
}
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- // For simplicity we always test the accumulator register.
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
}
@@ -709,10 +671,6 @@ void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
}
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
@@ -906,22 +864,21 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ li(a2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
DCHECK(IsDeclaredVariableMode(mode));
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
- __ li(a1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (hole_init) {
__ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
- __ Push(cp, a2, a1, a0);
} else {
DCHECK(Smi::FromInt(0) == 0);
__ mov(a0, zero_reg); // Smi::FromInt(0) indicates no initial value.
- __ Push(cp, a2, a1, a0);
}
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ __ Push(a2, a0);
+ __ CallRuntime(IsImmutableVariableMode(mode)
+ ? Runtime::kDeclareReadOnlyLookupSlot
+ : Runtime::kDeclareLookupSlot,
+ 2);
break;
}
}
@@ -974,53 +931,22 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ li(a2, Operand(variable->name()));
- __ li(a1, Operand(Smi::FromInt(NONE)));
- __ Push(cp, a2, a1);
+ __ Push(a2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
break;
}
}
}
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case VariableLocation::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- case VariableLocation::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
- // The context is the first argument.
__ li(a1, Operand(pairs));
__ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
- __ Push(cp, a1, a0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ Push(a1, a0);
+ __ CallRuntime(Runtime::kDeclareGlobals, 2);
// Return value is ignored.
}
@@ -1154,8 +1080,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ GetObjectType(a0, a1, a1);
__ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
__ bind(&convert);
- __ push(a0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
__ mov(a0, v0);
__ bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
@@ -1339,12 +1265,6 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
}
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
int offset,
FeedbackVectorICSlot slot) {
@@ -1361,7 +1281,7 @@ void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofState typeof_state,
+ TypeofMode typeof_mode,
Label* slow) {
Register current = cp;
Register next = a1;
@@ -1407,7 +1327,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
// All extension objects were empty and it is safe to use a normal global
// load machinery.
- EmitGlobalVariableLoad(proxy, typeof_state);
+ EmitGlobalVariableLoad(proxy, typeof_mode);
}
@@ -1442,9 +1362,8 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
+ TypeofMode typeof_mode,
+ Label* slow, Label* done) {
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
@@ -1452,7 +1371,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
// containing the eval.
Variable* var = proxy->var();
if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
+ EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
__ Branch(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
@@ -1477,22 +1396,36 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
+ TypeofMode typeof_mode) {
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ ld(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- // Inside typeof use a regular load, not a contextual load, to avoid
- // a reference error.
- CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
+ if (var->IsGlobalSlot()) {
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ int const slot = var->index();
+ int const depth = scope()->ContextChainLength(var->scope());
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ li(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ LoadGlobalViaContextStub stub(isolate(), depth);
+ __ CallStub(&stub);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+
+ } else {
+ __ ld(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
+ }
}
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
+ TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
@@ -1504,7 +1437,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- EmitGlobalVariableLoad(proxy, typeof_state);
+ EmitGlobalVariableLoad(proxy, typeof_mode);
context()->Plug(v0);
break;
}
@@ -1512,7 +1445,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
case VariableLocation::CONTEXT: {
- DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (var->binding_needs_init()) {
@@ -1543,8 +1476,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
} else if (var->is_this()) {
- CHECK(info_->function() != nullptr &&
- (info_->function()->kind() & kSubclassConstructor) != 0);
+ CHECK(info_->has_literal() &&
+ (info_->literal()->kind() & kSubclassConstructor) != 0);
// TODO(dslomov): implement 'this' hole check elimination.
skip_init_check = false;
} else {
@@ -1588,12 +1521,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
__ bind(&slow);
__ li(a1, Operand(var->name()));
__ Push(cp, a1); // Context and name.
Runtime::FunctionId function_id =
- typeof_state == NOT_INSIDE_TYPEOF
+ typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
__ CallRuntime(function_id, 2);
@@ -1990,7 +1923,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpression());
+ DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
SetExpressionPosition(expr, INSERT_BREAK);
@@ -2149,8 +2082,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label suspend, continuation, post_runtime, resume;
__ jmp(&suspend);
-
__ bind(&continuation);
+ __ RecordGeneratorContinuation();
__ jmp(&resume);
__ bind(&suspend);
@@ -2220,10 +2153,13 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
EnterTryBlock(handler_index, &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(a0); // result
+
__ jmp(&l_suspend);
__ bind(&l_continuation);
+ __ RecordGeneratorContinuation();
__ mov(a0, v0);
__ jmp(&l_resume);
+
__ bind(&l_suspend);
const int generator_object_depth = kPointerSize + try_block_size;
__ ld(a0, MemOperand(sp, generator_object_depth));
@@ -2261,6 +2197,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a0, v0);
__ mov(a1, a0);
__ sd(a1, MemOperand(sp, 2 * kPointerSize));
+ SetCallPosition(expr, 1);
CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
__ CallStub(&stub);
@@ -2274,7 +2211,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
__ li(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL); // v0=result.done
+ CallLoadIC(NOT_INSIDE_TYPEOF); // v0=result.done
__ mov(a0, v0);
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
@@ -2285,7 +2222,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
__ li(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL); // v0=result.value
+ CallLoadIC(NOT_INSIDE_TYPEOF); // v0=result.value
context()->DropAndPlug(2, v0); // drop iter and g
break;
}
@@ -2429,7 +2366,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ li(LoadDescriptor::NameRegister(), Operand(key->value()));
__ li(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL, language_mode());
+ CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
}
@@ -2622,11 +2559,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
}
}
- // prototype
- __ CallRuntime(Runtime::kToFastProperties, 1);
-
- // constructor
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ // Set both the prototype and constructor to have fast properties, and also
+ // freeze them in strong mode.
+ __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
}
@@ -2644,7 +2579,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
FeedbackVectorICSlot slot) {
- DCHECK(expr->IsValidReferenceExpression());
+ DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
LhsKind assign_type = Property::GetAssignType(prop);
@@ -2737,7 +2672,7 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
FeedbackVectorICSlot slot) {
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::ValueRegister(), result_register());
__ li(StoreDescriptor::NameRegister(), Operand(var->name()));
@@ -2745,6 +2680,27 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
+ } else if (var->IsGlobalSlot()) {
+ // Global var, const, or let.
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(a0));
+ __ mov(StoreGlobalViaContextDescriptor::ValueRegister(), result_register());
+ int const slot = var->index();
+ int const depth = scope()->ContextChainLength(var->scope());
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ li(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
+ __ CallStub(&stub);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ Push(a0);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
+
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2776,6 +2732,20 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&const_error);
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ } else if (var->is_this() && op == Token::INIT_CONST) {
+ // Initializing assignment to const {this} needs a write barrier.
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label uninitialized_this;
+ MemOperand location = VarOperand(var, a1);
+ __ ld(a3, location);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&uninitialized_this, eq, a3, Operand(at));
+ __ li(a0, Operand(var->name()));
+ __ Push(a0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@@ -3091,7 +3061,7 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
}
// Record source position of the IC call.
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
__ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -3128,22 +3098,6 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
-void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperCallReference* super_ref, FeedbackVectorICSlot slot) {
- Variable* this_var = super_ref->this_var()->var();
- GetVar(a1, this_var);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- Label uninitialized_this;
- __ Branch(&uninitialized_this, eq, a1, Operand(at));
- __ li(a0, Operand(this_var->name()));
- __ Push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&uninitialized_this);
-
- EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
-}
-
-
// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
VariableProxy* callee = expr->expression()->AsVariableProxy();
@@ -3206,25 +3160,25 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- PushCalleeAndWithBaseObject(expr);
+ PushCalleeAndWithBaseObject(expr);
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(a1);
- EmitResolvePossiblyDirectEval(arg_count);
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ push(a1);
+ EmitResolvePossiblyDirectEval(arg_count);
- // Touch up the stack with the resolved function.
- __ sd(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ // Touch up the stack with the resolved function.
+ __ sd(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -3295,7 +3249,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetExpressionPosition(expr);
+ SetConstructCallPosition(expr);
// Load function and argument count into a1 and a0.
__ li(a0, Operand(arg_count));
@@ -3323,9 +3277,6 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- VariableProxy* new_target_proxy = super_call_ref->new_target_var();
- VisitForStackValue(new_target_proxy);
-
EmitLoadSuperConstructor(super_call_ref);
__ push(result_register());
@@ -3338,7 +3289,11 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetExpressionPosition(expr);
+ SetConstructCallPosition(expr);
+
+ // Load original constructor into a4.
+ VisitForAccumulatorValue(super_call_ref->new_target_var());
+ __ mov(a4, result_register());
// Load function and argument count into a1 and a0.
__ li(a0, Operand(arg_count));
@@ -3360,11 +3315,8 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
-
RecordJSReturnSite(expr);
- EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
context()->Plug(v0);
}
@@ -3465,7 +3417,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3475,15 +3427,13 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
__ JumpIfSmi(v0, if_false);
- __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
+ __ GetObjectType(v0, a1, a1);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ And(at, a1, Operand(1 << Map::kIsUndetectable));
- Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
+ Split(eq, a1, Operand(SIMD128_VALUE_TYPE), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3875,33 +3825,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4058,18 +3981,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- MathPowStub stub(isolate(), MathPowStub::ON_STACK);
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -4113,6 +4024,20 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into a0 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+ __ mov(a0, result_register());
+
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4242,19 +4167,6 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() >= 2);
@@ -4299,6 +4211,9 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ CallRuntime(Runtime::kGetPrototype, 1);
__ Push(result_register());
+ // Load original constructor into a4.
+ __ ld(a4, MemOperand(sp, 1 * kPointerSize));
+
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
__ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -4362,60 +4277,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
-
- DCHECK_NOT_NULL(args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort(kAttemptToUseUndefinedCache);
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- context()->Plug(v0);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = v0;
- Register cache = a1;
- __ ld(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ld(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
- __ ld(cache,
- ContextOperand(
- cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ ld(cache,
- FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
-
- Label done, not_found;
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ ld(a2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
- // a2 now holds finger offset as a smi.
- __ Daddu(a3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // a3 now points to the start of fixed array elements.
- __ SmiScale(at, a2, kPointerSizeLog2);
- __ daddu(a3, a3, at);
- // a3 now points to key of indexed element of cache.
- __ ld(a2, MemOperand(a3));
- __ Branch(&not_found, ne, key, Operand(a2));
-
- __ ld(v0, MemOperand(a3, kPointerSize));
- __ Branch(&done);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCacheRT, 2);
-
- __ bind(&done);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4694,47 +4555,6 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
- // Assert: expr === CallRuntime("ReflectConstruct")
- DCHECK_EQ(1, expr->arguments()->length());
- CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
-
- ZoneList<Expression*>* args = call->arguments();
- DCHECK_EQ(3, args->length());
-
- SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Load ReflectConstruct function
- EmitLoadJSRuntimeFunction(call);
-
- // Push the target function under the receiver.
- __ ld(at, MemOperand(sp, 0));
- __ push(at);
- __ sd(v0, MemOperand(sp, kPointerSize));
-
- // Push super constructor
- EmitLoadSuperConstructor(super_call_ref);
- __ Push(result_register());
-
- // Push arguments array
- VisitForStackValue(args->at(1));
-
- // Push NewTarget
- DCHECK(args->at(2)->IsVariableProxy());
- VisitForStackValue(args->at(2));
-
- EmitCallJSRuntimeFunction(call);
-
- // Restore context register.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, v0);
-
- // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
- EmitInitializeThisAfterSuper(super_call_ref);
-}
-
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push the builtins object as the receiver.
Register receiver = LoadDescriptor::ReceiverRegister();
@@ -4746,7 +4566,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ li(LoadDescriptor::NameRegister(), Operand(expr->name()));
__ li(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
+ CallLoadIC(NOT_INSIDE_TYPEOF);
}
@@ -4754,7 +4574,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -4823,9 +4643,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ li(a1, Operand(Smi::FromInt(language_mode())));
- __ push(a1);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy,
+ 2);
context()->Plug(v0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4836,9 +4657,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (var->IsUnallocatedOrGlobalSlot()) {
__ ld(a2, GlobalObjectOperand());
__ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(SLOPPY)));
- __ Push(a2, a1, a0);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ Push(a2, a1);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
context()->Plug(v0);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
@@ -4928,7 +4748,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- DCHECK(expr->expression()->IsValidReferenceExpression());
+ DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ CountOperation");
@@ -5213,13 +5033,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
} else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(v0, if_false);
- // Check for undetectable objects => false.
__ GetObjectType(v0, v0, a1);
- __ Branch(if_false, ge, a1, Operand(FIRST_NONSTRING_TYPE));
- __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
- __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
- Split(eq, a1, Operand(zero_reg),
- if_true, if_false, fall_through);
+ Split(lt, a1, Operand(FIRST_NONSTRING_TYPE), if_true, if_false,
+ fall_through);
} else if (String::Equals(check, factory->symbol_string())) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, v0, a1);
@@ -5258,6 +5074,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
__ And(a1, a1, Operand(1 << Map::kIsUndetectable));
Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(check, factory->type##_string())) { \
+ __ JumpIfSmi(v0, if_false); \
+ __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset)); \
+ __ LoadRoot(at, Heap::k##Type##MapRootIndex); \
+ Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
} else {
if (if_false != fall_through) __ jmp(if_false);
}
@@ -5392,21 +5218,21 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_script_scope() ||
- declaration_scope->is_module_scope()) {
+ Scope* closure_scope = scope()->ClosureScope();
+ if (closure_scope->is_script_scope() ||
+ closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
__ li(at, Operand(Smi::FromInt(0)));
- } else if (declaration_scope->is_eval_scope()) {
+ } else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
__ ld(at, ContextOperand(cp, Context::CLOSURE_INDEX));
} else {
- DCHECK(declaration_scope->is_function_scope());
+ DCHECK(closure_scope->is_function_scope());
__ ld(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
__ push(at);
diff --git a/deps/v8/src/full-codegen/ppc/OWNERS b/deps/v8/src/full-codegen/ppc/OWNERS
new file mode 100644
index 0000000000..eb007cb908
--- /dev/null
+++ b/deps/v8/src/full-codegen/ppc/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/src/ppc/full-codegen-ppc.cc b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
index ec94a242b4..656c375232 100644
--- a/deps/v8/src/ppc/full-codegen-ppc.cc
+++ b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -2,16 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
-#include "src/debug.h"
-#include "src/full-codegen.h"
+#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
#include "src/parser.h"
#include "src/scopes.h"
@@ -111,7 +109,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop-at");
}
#endif
@@ -155,7 +153,7 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
if (locals_count > 0) {
if (locals_count >= 128) {
Label ok;
@@ -327,10 +325,10 @@ void FullCodeGenerator::Generate() {
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
+ // The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !is_simple_parameter_list()) {
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
@@ -357,18 +355,14 @@ void FullCodeGenerator::Generate() {
PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- DCHECK(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
- VisitVariableDeclaration(function);
- }
VisitDeclarations(scope()->declarations());
}
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
+
{
Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
@@ -417,10 +411,6 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
- if (info_->is_debug()) {
- // Detect debug break requests as soon as possible.
- reset_value = FLAG_interrupt_budget >> 4;
- }
__ mov(r5, Operand(profiling_counter_));
__ LoadSmiLiteral(r6, Smi::FromInt(reset_value));
__ StoreP(r6, FieldMemOperand(r5, Cell::kValueOffset), r0);
@@ -491,11 +481,6 @@ void FullCodeGenerator::EmitReturnSequence() {
EmitProfilingCounterReset();
__ bind(&ok);
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- __ bind(&check_exit_codesize);
-#endif
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{
@@ -503,41 +488,14 @@ void FullCodeGenerator::EmitReturnSequence() {
int32_t arg_count = info_->scope()->num_parameters() + 1;
int32_t sp_delta = arg_count * kPointerSize;
SetReturnPosition(function());
- __ RecordJSReturn();
int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
-#if V8_TARGET_ARCH_PPC64
- // With 64bit we may need nop() instructions to ensure we have
- // enough space to SetDebugBreakAtReturn()
- if (is_int16(sp_delta)) {
- if (!FLAG_enable_embedded_constant_pool) masm_->nop();
- masm_->nop();
- }
-#endif
__ blr();
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
-
-#ifdef DEBUG
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- DCHECK(Assembler::kJSReturnSequenceInstructions <=
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
-#endif
}
}
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
@@ -545,15 +503,6 @@ void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
}
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- // For simplicity we always test the accumulator register.
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {}
@@ -703,9 +652,6 @@ void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
}
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {}
-
-
void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
@@ -884,21 +830,21 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(r5, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
DCHECK(IsDeclaredVariableMode(mode));
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
- __ LoadSmiLiteral(r4, Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (hole_init) {
__ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- __ Push(cp, r5, r4, r3);
+ __ Push(r5, r3);
} else {
__ LoadSmiLiteral(r3, Smi::FromInt(0)); // Indicates no initial value.
- __ Push(cp, r5, r4, r3);
+ __ Push(r5, r3);
}
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ __ CallRuntime(IsImmutableVariableMode(mode)
+ ? Runtime::kDeclareReadOnlyLookupSlot
+ : Runtime::kDeclareLookupSlot,
+ 2);
break;
}
}
@@ -946,53 +892,22 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ mov(r5, Operand(variable->name()));
- __ LoadSmiLiteral(r4, Smi::FromInt(NONE));
- __ Push(cp, r5, r4);
+ __ Push(r5);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
break;
}
}
}
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case VariableLocation::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- case VariableLocation::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
- // The context is the first argument.
__ mov(r4, Operand(pairs));
__ LoadSmiLiteral(r3, Smi::FromInt(DeclareGlobalsFlags()));
- __ Push(cp, r4, r3);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ Push(r4, r3);
+ __ CallRuntime(Runtime::kDeclareGlobals, 2);
// Return value is ignored.
}
@@ -1129,8 +1044,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
__ bge(&done_convert);
__ bind(&convert);
- __ push(r3);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
__ bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ push(r3);
@@ -1318,12 +1233,6 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
}
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
int offset,
FeedbackVectorICSlot slot) {
@@ -1340,7 +1249,7 @@ void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofState typeof_state,
+ TypeofMode typeof_mode,
Label* slow) {
Register current = cp;
Register next = r4;
@@ -1389,7 +1298,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
// All extension objects were empty and it is safe to use a normal global
// load machinery.
- EmitGlobalVariableLoad(proxy, typeof_state);
+ EmitGlobalVariableLoad(proxy, typeof_mode);
}
@@ -1426,7 +1335,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofState typeof_state,
+ TypeofMode typeof_mode,
Label* slow, Label* done) {
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
@@ -1435,7 +1344,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
// containing the eval.
Variable* var = proxy->var();
if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
+ EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
__ b(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
@@ -1458,22 +1367,35 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
+ TypeofMode typeof_mode) {
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- // Inside typeof use a regular load, not a contextual load, to avoid
- // a reference error.
- CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
+ if (var->IsGlobalSlot()) {
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ const int slot = var->index();
+ const int depth = scope()->ContextChainLength(var->scope());
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ LoadGlobalViaContextStub stub(isolate(), depth);
+ __ CallStub(&stub);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+ } else {
+ __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
+ }
}
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
+ TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
@@ -1485,7 +1407,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- EmitGlobalVariableLoad(proxy, typeof_state);
+ EmitGlobalVariableLoad(proxy, typeof_mode);
context()->Plug(r3);
break;
}
@@ -1493,7 +1415,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
case VariableLocation::CONTEXT: {
- DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (var->binding_needs_init()) {
@@ -1524,8 +1446,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
} else if (var->is_this()) {
- CHECK(info_->function() != nullptr &&
- (info_->function()->kind() & kSubclassConstructor) != 0);
+ CHECK(info_->has_literal() &&
+ (info_->literal()->kind() & kSubclassConstructor) != 0);
// TODO(dslomov): implement 'this' hole check elimination.
skip_init_check = false;
} else {
@@ -1567,12 +1489,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
__ bind(&slow);
__ mov(r4, Operand(var->name()));
__ Push(cp, r4); // Context and name.
Runtime::FunctionId function_id =
- typeof_state == NOT_INSIDE_TYPEOF
+ typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
__ CallRuntime(function_id, 2);
@@ -1965,7 +1887,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpression());
+ DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
SetExpressionPosition(expr, INSERT_BREAK);
@@ -2122,8 +2044,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label suspend, continuation, post_runtime, resume;
__ b(&suspend);
-
__ bind(&continuation);
+ __ RecordGeneratorContinuation();
__ b(&resume);
__ bind(&suspend);
@@ -2196,9 +2118,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
EnterTryBlock(handler_index, &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(r3); // result
+
__ b(&l_suspend);
__ bind(&l_continuation);
+ __ RecordGeneratorContinuation();
__ b(&l_resume);
+
__ bind(&l_suspend);
const int generator_object_depth = kPointerSize + try_block_size;
__ LoadP(r3, MemOperand(sp, generator_object_depth));
@@ -2236,6 +2161,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ mr(r4, r3);
__ StoreP(r4, MemOperand(sp, 2 * kPointerSize));
+ SetCallPosition(expr, 1);
CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
__ CallStub(&stub);
@@ -2249,7 +2175,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
__ mov(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL); // r0=result.done
+ CallLoadIC(NOT_INSIDE_TYPEOF); // r0=result.done
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
__ cmpi(r3, Operand::Zero());
@@ -2260,7 +2186,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
__ mov(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL); // r3=result.value
+ CallLoadIC(NOT_INSIDE_TYPEOF); // r3=result.value
context()->DropAndPlug(2, r3); // drop iter and g
break;
}
@@ -2421,7 +2347,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
__ mov(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL, language_mode());
+ CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
}
@@ -2645,11 +2571,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
}
}
- // prototype
- __ CallRuntime(Runtime::kToFastProperties, 1);
-
- // constructor
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ // Set both the prototype and constructor to have fast properties, and also
+ // freeze them in strong mode.
+ __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
}
@@ -2666,7 +2590,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
FeedbackVectorICSlot slot) {
- DCHECK(expr->IsValidReferenceExpression());
+ DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
LhsKind assign_type = Property::GetAssignType(prop);
@@ -2759,13 +2683,32 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
FeedbackVectorICSlot slot) {
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
__ LoadP(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
+ } else if (var->IsGlobalSlot()) {
+ // Global var, const, or let.
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ const int slot = var->index();
+ const int depth = scope()->ContextChainLength(var->scope());
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(r3));
+ StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
+ __ CallStub(&stub);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ push(r3);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2797,6 +2740,20 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&const_error);
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ } else if (var->is_this() && op == Token::INIT_CONST) {
+ // Initializing assignment to const {this} needs a write barrier.
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label uninitialized_this;
+ MemOperand location = VarOperand(var, r4);
+ __ LoadP(r6, location);
+ __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
+ __ beq(&uninitialized_this);
+ __ mov(r4, Operand(var->name()));
+ __ push(r4);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@@ -3101,7 +3058,7 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
@@ -3139,22 +3096,6 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
-void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperCallReference* super_ref, FeedbackVectorICSlot slot) {
- Variable* this_var = super_ref->this_var()->var();
- GetVar(r4, this_var);
- __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
- Label uninitialized_this;
- __ beq(&uninitialized_this);
- __ mov(r4, Operand(this_var->name()));
- __ push(r4);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&uninitialized_this);
-
- EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
-}
-
-
// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
VariableProxy* callee = expr->expression()->AsVariableProxy();
@@ -3236,7 +3177,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ CallStub(&stub);
@@ -3308,7 +3249,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetExpressionPosition(expr);
+ SetConstructCallPosition(expr);
// Load function and argument count into r4 and r3.
__ mov(r3, Operand(arg_count));
@@ -3336,9 +3277,6 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- VariableProxy* new_target_proxy = super_call_ref->new_target_var();
- VisitForStackValue(new_target_proxy);
-
EmitLoadSuperConstructor(super_call_ref);
__ push(result_register());
@@ -3351,7 +3289,11 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetExpressionPosition(expr);
+ SetConstructCallPosition(expr);
+
+ // Load original constructor into r7.
+ VisitForAccumulatorValue(super_call_ref->new_target_var());
+ __ mr(r7, result_register());
// Load function and argument count into r1 and r0.
__ mov(r3, Operand(arg_count));
@@ -3373,11 +3315,8 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
-
RecordJSReturnSite(expr);
- EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
context()->Plug(r3);
}
@@ -3479,7 +3418,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3493,11 +3432,9 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(r3, if_false);
- __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ lbz(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+ __ CompareObjectType(r3, r4, r4, SIMD128_VALUE_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ne, if_true, if_false, fall_through, cr0);
+ Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3900,33 +3837,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(r3);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4072,18 +3982,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- MathPowStub stub(isolate(), MathPowStub::ON_STACK);
- __ CallStub(&stub);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -4124,6 +4022,18 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ // Load the argument into r3 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(r3);
+}
+
+
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4238,18 +4148,6 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() >= 2);
@@ -4295,6 +4193,9 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ mr(r4, result_register());
__ Push(r4);
+ // Load original constructor into r7.
+ __ LoadP(r7, MemOperand(sp, 1 * kPointerSize));
+
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
__ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -4351,56 +4252,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- DCHECK_NOT_NULL(args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort(kAttemptToUseUndefinedCache);
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- context()->Plug(r3);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = r3;
- Register cache = r4;
- __ LoadP(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ LoadP(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
- __ LoadP(cache,
- ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ LoadP(cache,
- FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)), r0);
-
- Label done, not_found;
- __ LoadP(r5, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
- // r5 now holds finger offset as a smi.
- __ addi(r6, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // r6 now points to the start of fixed array elements.
- __ SmiToPtrArrayOffset(r5, r5);
- __ LoadPUX(r5, MemOperand(r6, r5));
- // r6 now points to the key of the pair.
- __ cmp(key, r5);
- __ bne(&not_found);
-
- __ LoadP(r3, MemOperand(r6, kPointerSize));
- __ b(&done);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCacheRT, 2);
-
- __ bind(&done);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4415,10 +4266,9 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
__ lwz(r3, FieldMemOperand(r3, String::kHashFieldOffset));
// PPC - assume ip is free
__ mov(ip, Operand(String::kContainsCachedArrayIndexMask));
- __ and_(r0, r3, ip);
- __ cmpi(r0, Operand::Zero());
+ __ and_(r0, r3, ip, SetRC);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
+ Split(eq, if_true, if_false, fall_through, cr0);
context()->Plug(if_true, if_false);
}
@@ -4700,47 +4550,6 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
- // Assert: expr === CallRuntime("ReflectConstruct")
- DCHECK_EQ(1, expr->arguments()->length());
- CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
-
- ZoneList<Expression*>* args = call->arguments();
- DCHECK_EQ(3, args->length());
-
- SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Load ReflectConstruct function
- EmitLoadJSRuntimeFunction(call);
-
- // Push the target function under the receiver.
- __ LoadP(r0, MemOperand(sp, 0));
- __ push(r0);
- __ StoreP(r3, MemOperand(sp, kPointerSize));
-
- // Push super constructor
- EmitLoadSuperConstructor(super_call_ref);
- __ Push(result_register());
-
- // Push arguments array
- VisitForStackValue(args->at(1));
-
- // Push NewTarget
- DCHECK(args->at(2)->IsVariableProxy());
- VisitForStackValue(args->at(2));
-
- EmitCallJSRuntimeFunction(call);
-
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r3);
-
- // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
- EmitInitializeThisAfterSuper(super_call_ref);
-}
-
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push the builtins object as the receiver.
Register receiver = LoadDescriptor::ReceiverRegister();
@@ -4752,7 +4561,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ mov(LoadDescriptor::NameRegister(), Operand(expr->name()));
__ mov(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
+ CallLoadIC(NOT_INSIDE_TYPEOF);
}
@@ -4760,7 +4569,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ CallStub(&stub);
@@ -4830,9 +4639,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ LoadSmiLiteral(r4, Smi::FromInt(language_mode()));
- __ push(r4);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy,
+ 2);
context()->Plug(r3);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4843,9 +4653,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (var->IsUnallocatedOrGlobalSlot()) {
__ LoadP(r5, GlobalObjectOperand());
__ mov(r4, Operand(var->name()));
- __ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY));
- __ Push(r5, r4, r3);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ Push(r5, r4);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
context()->Plug(r3);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
@@ -4931,7 +4740,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- DCHECK(expr->expression()->IsValidReferenceExpression());
+ DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ CountOperation");
@@ -5215,13 +5024,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(eq, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(r3, if_false);
- // Check for undetectable objects => false.
__ CompareObjectType(r3, r3, r4, FIRST_NONSTRING_TYPE);
- __ bge(if_false);
- __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
- STATIC_ASSERT((1 << Map::kIsUndetectable) < 0x8000);
- __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through, cr0);
+ Split(lt, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->symbol_string())) {
__ JumpIfSmi(r3, if_false);
__ CompareObjectType(r3, r3, r4, SYMBOL_TYPE);
@@ -5261,6 +5065,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
__ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
Split(eq, if_true, if_false, fall_through, cr0);
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(check, factory->type##_string())) { \
+ __ JumpIfSmi(r3, if_false); \
+ __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); \
+ __ CompareRoot(r3, Heap::k##Type##MapRootIndex); \
+ Split(eq, if_true, if_false, fall_through);
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
} else {
if (if_false != fall_through) __ b(if_false);
}
@@ -5393,21 +5207,21 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_script_scope() ||
- declaration_scope->is_module_scope()) {
+ Scope* closure_scope = scope()->ClosureScope();
+ if (closure_scope->is_script_scope() ||
+ closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
__ LoadSmiLiteral(ip, Smi::FromInt(0));
- } else if (declaration_scope->is_eval_scope()) {
+ } else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
__ LoadP(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
} else {
- DCHECK(declaration_scope->is_function_scope());
+ DCHECK(closure_scope->is_function_scope());
__ LoadP(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
__ push(ip);
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
index ef8c15087f..7e0553103f 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
@@ -2,16 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
-#include "src/debug.h"
-#include "src/full-codegen.h"
+#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
#include "src/parser.h"
#include "src/scopes.h"
@@ -102,7 +100,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
@@ -140,7 +138,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
if (locals_count == 1) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
} else if (locals_count > 1) {
@@ -319,7 +317,7 @@ void FullCodeGenerator::Generate() {
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !is_simple_parameter_list()) {
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
@@ -345,18 +343,14 @@ void FullCodeGenerator::Generate() {
} else {
PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- DCHECK(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
- VisitVariableDeclaration(function);
- }
VisitDeclarations(scope()->declarations());
}
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
+
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
@@ -467,52 +461,20 @@ void FullCodeGenerator::EmitReturnSequence() {
__ Pop(rax);
EmitProfilingCounterReset();
__ bind(&ok);
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
+
SetReturnPosition(function());
- __ RecordJSReturn();
- // Do not use the leave instruction here because it is too short to
- // patch with the code required by the debugger.
- __ movp(rsp, rbp);
- __ popq(rbp);
int no_frame_start = masm_->pc_offset();
+ __ leave();
int arg_count = info_->scope()->num_parameters() + 1;
int arguments_bytes = arg_count * kPointerSize;
__ Ret(arguments_bytes, rcx);
- // Add padding that will be overwritten by a debugger breakpoint. We
- // have just generated at least 7 bytes: "movp rsp, rbp; pop rbp; ret k"
- // (3 + 1 + 3) for x64 and at least 6 (2 + 1 + 3) bytes for x32.
- const int kPadding = Assembler::kJSReturnSequenceLength -
- kPointerSize == kInt64Size ? 7 : 6;
- for (int i = 0; i < kPadding; ++i) {
- masm_->int3();
- }
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- DCHECK(Assembler::kJSReturnSequenceLength <=
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand operand = codegen()->VarOperand(var, result_register());
@@ -520,13 +482,6 @@ void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
}
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
}
@@ -689,10 +644,6 @@ void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
}
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
@@ -875,13 +826,9 @@ void FullCodeGenerator::VisitVariableDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
- __ Push(rsi);
__ Push(variable->name());
// Declaration nodes are always introduced in one of four modes.
DCHECK(IsDeclaredVariableMode(mode));
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
- __ Push(Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
@@ -891,7 +838,10 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ Push(Smi::FromInt(0)); // Indicates no initial value.
}
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ __ CallRuntime(IsImmutableVariableMode(mode)
+ ? Runtime::kDeclareReadOnlyLookupSlot
+ : Runtime::kDeclareLookupSlot,
+ 2);
break;
}
}
@@ -942,52 +892,20 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
- __ Push(rsi);
__ Push(variable->name());
- __ Push(Smi::FromInt(NONE));
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
break;
}
}
}
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::UNALLOCATED:
- case VariableLocation::GLOBAL:
- // TODO(rossberg)
- break;
-
- case VariableLocation::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- case VariableLocation::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
- __ Push(rsi); // The context is the first argument.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kDeclareGlobals, 2);
// Return value is ignored.
}
@@ -1119,12 +1037,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
- __ JumpIfSmi(rax, &convert);
+ __ JumpIfSmi(rax, &convert, Label::kNear);
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &done_convert);
+ __ j(above_equal, &done_convert, Label::kNear);
__ bind(&convert);
- __ Push(rax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
__ bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ Push(rax);
@@ -1311,12 +1229,6 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
}
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
int offset,
FeedbackVectorICSlot slot) {
@@ -1333,7 +1245,7 @@ void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofState typeof_state,
+ TypeofMode typeof_mode,
Label* slow) {
Register context = rsi;
Register temp = rdx;
@@ -1383,7 +1295,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
// All extension objects were empty and it is safe to use a normal global
// load machinery.
- EmitGlobalVariableLoad(proxy, typeof_state);
+ EmitGlobalVariableLoad(proxy, typeof_mode);
}
@@ -1418,9 +1330,8 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
+ TypeofMode typeof_mode,
+ Label* slow, Label* done) {
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
@@ -1428,7 +1339,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
// containing the eval.
Variable* var = proxy->var();
if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
+ EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
__ jmp(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
@@ -1450,22 +1361,36 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
+ TypeofMode typeof_mode) {
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ Move(LoadDescriptor::NameRegister(), var->name());
- __ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ Move(LoadDescriptor::SlotRegister(),
- SmiFromSlot(proxy->VariableFeedbackSlot()));
- // Inside typeof use a regular load, not a contextual load, to avoid
- // a reference error.
- CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
+ if (var->IsGlobalSlot()) {
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ int const slot = var->index();
+ int const depth = scope()->ContextChainLength(var->scope());
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ Set(LoadGlobalViaContextDescriptor::SlotRegister(), slot);
+ LoadGlobalViaContextStub stub(isolate(), depth);
+ __ CallStub(&stub);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+
+ } else {
+ __ Move(LoadDescriptor::NameRegister(), var->name());
+ __ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ Move(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(proxy->VariableFeedbackSlot()));
+ CallLoadIC(typeof_mode);
+ }
}
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
+ TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
@@ -1477,7 +1402,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- EmitGlobalVariableLoad(proxy, typeof_state);
+ EmitGlobalVariableLoad(proxy, typeof_mode);
context()->Plug(rax);
break;
}
@@ -1485,7 +1410,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
case VariableLocation::CONTEXT: {
- DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context slot"
: "[ Stack slot");
if (var->binding_needs_init()) {
@@ -1516,8 +1441,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
} else if (var->is_this()) {
- CHECK(info_->function() != nullptr &&
- (info_->function()->kind() & kSubclassConstructor) != 0);
+ CHECK(info_->has_literal() &&
+ (info_->literal()->kind() & kSubclassConstructor) != 0);
// TODO(dslomov): implement 'this' hole check elimination.
skip_init_check = false;
} else {
@@ -1558,12 +1483,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
__ bind(&slow);
__ Push(rsi); // Context.
__ Push(var->name());
Runtime::FunctionId function_id =
- typeof_state == NOT_INSIDE_TYPEOF
+ typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
__ CallRuntime(function_id, 2);
@@ -1960,7 +1885,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpression());
+ DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
SetExpressionPosition(expr, INSERT_BREAK);
@@ -2113,8 +2038,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label suspend, continuation, post_runtime, resume;
__ jmp(&suspend);
-
__ bind(&continuation);
+ __ RecordGeneratorContinuation();
__ jmp(&resume);
__ bind(&suspend);
@@ -2188,9 +2113,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
EnterTryBlock(handler_index, &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ Push(rax); // result
+
__ jmp(&l_suspend);
__ bind(&l_continuation);
+ __ RecordGeneratorContinuation();
__ jmp(&l_resume);
+
__ bind(&l_suspend);
const int generator_object_depth = kPointerSize + try_block_size;
__ movp(rax, Operand(rsp, generator_object_depth));
@@ -2228,6 +2156,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ movp(rdi, rax);
__ movp(Operand(rsp, 2 * kPointerSize), rdi);
+
+ SetCallPosition(expr, 1);
CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
__ CallStub(&stub);
@@ -2241,7 +2171,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
__ Move(LoadDescriptor::SlotRegister(),
SmiFromSlot(expr->DoneFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL); // rax=result.done
+ CallLoadIC(NOT_INSIDE_TYPEOF); // rax=result.done
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
__ testp(result_register(), result_register());
@@ -2252,7 +2182,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
__ Move(LoadDescriptor::SlotRegister(),
SmiFromSlot(expr->ValueFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL); // result.value in rax
+ CallLoadIC(NOT_INSIDE_TYPEOF); // result.value in rax
context()->DropAndPlug(2, rax); // drop iter and g
break;
}
@@ -2392,7 +2322,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ Move(LoadDescriptor::NameRegister(), key->value());
__ Move(LoadDescriptor::SlotRegister(),
SmiFromSlot(prop->PropertyFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL, language_mode());
+ CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
}
@@ -2547,11 +2477,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
}
}
- // prototype
- __ CallRuntime(Runtime::kToFastProperties, 1);
-
- // constructor
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ // Set both the prototype and constructor to have fast properties, and also
+ // freeze them in strong mode.
+ __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
}
@@ -2568,7 +2496,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
FeedbackVectorICSlot slot) {
- DCHECK(expr->IsValidReferenceExpression());
+ DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
LhsKind assign_type = Property::GetAssignType(prop);
@@ -2659,13 +2587,33 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
FeedbackVectorICSlot slot) {
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
// Global var, const, or let.
__ Move(StoreDescriptor::NameRegister(), var->name());
__ movp(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
+ } else if (var->IsGlobalSlot()) {
+ // Global var, const, or let.
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ int const slot = var->index();
+ int const depth = scope()->ContextChainLength(var->scope());
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ Set(StoreGlobalViaContextDescriptor::SlotRegister(), slot);
+ DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(rax));
+ StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
+ __ CallStub(&stub);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ Push(rax);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
+
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2694,6 +2642,19 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&const_error);
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ } else if (var->is_this() && op == Token::INIT_CONST) {
+ // Initializing assignment to const {this} needs a write barrier.
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label uninitialized_this;
+ MemOperand location = VarOperand(var, rcx);
+ __ movp(rdx, location);
+ __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &uninitialized_this);
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@@ -2996,7 +2957,7 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
__ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot()));
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
@@ -3035,21 +2996,6 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
-void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperCallReference* super_ref, FeedbackVectorICSlot slot) {
- Variable* this_var = super_ref->this_var()->var();
- GetVar(rcx, this_var);
- __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
- Label uninitialized_this;
- __ j(equal, &uninitialized_this);
- __ Push(this_var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&uninitialized_this);
-
- EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
-}
-
-
// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
VariableProxy* callee = expr->expression()->AsVariableProxy();
@@ -3108,24 +3054,24 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- PushCalleeAndWithBaseObject(expr);
+ PushCalleeAndWithBaseObject(expr);
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Push a copy of the function (found below the arguments) and resolve
+ // eval.
+ __ Push(Operand(rsp, (arg_count + 1) * kPointerSize));
+ EmitResolvePossiblyDirectEval(arg_count);
- // Push a copy of the function (found below the arguments) and resolve
- // eval.
- __ Push(Operand(rsp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(arg_count);
+ // Touch up the callee.
+ __ movp(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
- // Touch up the callee.
- __ movp(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
- // Record source position for debugger.
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -3196,7 +3142,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetExpressionPosition(expr);
+ SetConstructCallPosition(expr);
// Load function and argument count into rdi and rax.
__ Set(rax, arg_count);
@@ -3224,9 +3170,6 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- VariableProxy* new_target_proxy = super_call_ref->new_target_var();
- VisitForStackValue(new_target_proxy);
-
EmitLoadSuperConstructor(super_call_ref);
__ Push(result_register());
@@ -3239,9 +3182,13 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetExpressionPosition(expr);
+ SetConstructCallPosition(expr);
- // Load function and argument count into edi and eax.
+ // Load original constructor into rcx.
+ VisitForAccumulatorValue(super_call_ref->new_target_var());
+ __ movp(rcx, result_register());
+
+ // Load function and argument count into rdi and rax.
__ Set(rax, arg_count);
__ movp(rdi, Operand(rsp, arg_count * kPointerSize));
@@ -3261,11 +3208,8 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
__ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
-
RecordJSReturnSite(expr);
- EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
context()->Plug(rax);
}
@@ -3366,7 +3310,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3376,15 +3320,13 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
- __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
+ __ CmpObjectType(rax, SIMD128_VALUE_TYPE, rbx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(not_zero, if_true, if_false, fall_through);
+ Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3770,33 +3712,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3949,18 +3864,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- MathPowStub stub(isolate(), MathPowStub::ON_STACK);
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -4002,6 +3905,19 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into rax and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(rax);
+}
+
+
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4128,19 +4044,6 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() >= 2);
@@ -4185,11 +4088,14 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ CallRuntime(Runtime::kGetPrototype, 1);
__ Push(result_register());
+ // Load original constructor into rcx.
+ __ movp(rcx, Operand(rsp, 1 * kPointerSize));
+
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
__ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ movp(rbx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ Cmp(rbx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
// default constructor has no arguments, so no adaptor frame means no args.
__ movp(rax, Immediate(0));
@@ -4198,17 +4104,17 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
// Copy arguments from adaptor frame.
{
__ bind(&adaptor_frame);
- __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToInteger64(rcx, rcx);
+ __ movp(rbx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToInteger64(rbx, rbx);
- __ movp(rax, rcx);
- __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
+ __ movp(rax, rbx);
+ __ leap(rdx, Operand(rdx, rbx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
Label loop;
__ bind(&loop);
__ Push(Operand(rdx, -1 * kPointerSize));
__ subp(rdx, Immediate(kPointerSize));
- __ decp(rcx);
+ __ decp(rbx);
__ j(not_zero, &loop);
}
@@ -4239,63 +4145,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
-
- DCHECK_NOT_NULL(args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort(kAttemptToUseUndefinedCache);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- context()->Plug(rax);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = rax;
- Register cache = rbx;
- Register tmp = rcx;
- __ movp(cache, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movp(cache,
- FieldOperand(cache, GlobalObject::kNativeContextOffset));
- __ movp(cache,
- ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ movp(cache,
- FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
- Label done, not_found;
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movp(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
- // tmp now holds finger offset as a smi.
- SmiIndex index =
- __ SmiToIndex(kScratchRegister, tmp, kPointerSizeLog2);
- __ cmpp(key, FieldOperand(cache,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- __ j(not_equal, &not_found, Label::kNear);
- __ movp(rax, FieldOperand(cache,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize + kPointerSize));
- __ jmp(&done, Label::kNear);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ Push(cache);
- __ Push(key);
- __ CallRuntime(Runtime::kGetFromCacheRT, 2);
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4630,46 +4479,6 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
- // Assert: expr === CallRuntime("ReflectConstruct")
- DCHECK_EQ(1, expr->arguments()->length());
- CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
-
- ZoneList<Expression*>* args = call->arguments();
- DCHECK_EQ(3, args->length());
-
- SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Load ReflectConstruct function
- EmitLoadJSRuntimeFunction(call);
-
- // Push the target function under the receiver.
- __ Push(Operand(rsp, 0));
- __ movp(Operand(rsp, kPointerSize), rax);
-
- // Push super constructor
- EmitLoadSuperConstructor(super_call_ref);
- __ Push(result_register());
-
- // Push arguments array
- VisitForStackValue(args->at(1));
-
- // Push NewTarget
- DCHECK(args->at(2)->IsVariableProxy());
- VisitForStackValue(args->at(2));
-
- EmitCallJSRuntimeFunction(call);
-
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, rax);
-
- // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
- EmitInitializeThisAfterSuper(super_call_ref);
-}
-
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push the builtins object as receiver.
__ movp(rax, GlobalObjectOperand());
@@ -4680,7 +4489,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ Move(LoadDescriptor::NameRegister(), expr->name());
__ Move(LoadDescriptor::SlotRegister(),
SmiFromSlot(expr->CallRuntimeFeedbackSlot()));
- CallLoadIC(NOT_CONTEXTUAL);
+ CallLoadIC(NOT_INSIDE_TYPEOF);
}
@@ -4688,7 +4497,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -4757,8 +4566,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ Push(Smi::FromInt(language_mode()));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy,
+ 2);
context()->Plug(rax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4769,8 +4580,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (var->IsUnallocatedOrGlobalSlot()) {
__ Push(GlobalObjectOperand());
__ Push(var->name());
- __ Push(Smi::FromInt(SLOPPY));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
context()->Plug(rax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global variables is false. 'this' is
@@ -4866,7 +4676,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- DCHECK(expr->expression()->IsValidReferenceExpression());
+ DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ CountOperation");
@@ -4972,13 +4782,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- SmiOperationExecutionMode mode;
- mode.Add(PRESERVE_SOURCE_REGISTER);
- mode.Add(BAILOUT_ON_NO_OVERFLOW);
+ SmiOperationConstraints constraints =
+ SmiOperationConstraint::kPreserveSourceRegister |
+ SmiOperationConstraint::kBailoutOnNoOverflow;
if (expr->op() == Token::INC) {
- __ SmiAddConstant(rax, rax, Smi::FromInt(1), mode, &done, Label::kNear);
+ __ SmiAddConstant(rax, rax, Smi::FromInt(1), constraints, &done,
+ Label::kNear);
} else {
- __ SmiSubConstant(rax, rax, Smi::FromInt(1), mode, &done, Label::kNear);
+ __ SmiSubConstant(rax, rax, Smi::FromInt(1), constraints, &done,
+ Label::kNear);
}
__ jmp(&stub_call, Label::kNear);
__ bind(&slow);
@@ -5144,12 +4956,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(equal, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(rax, if_false);
- // Check for undetectable objects => false.
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
- __ j(above_equal, if_false);
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- Split(zero, if_true, if_false, fall_through);
+ Split(below, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->symbol_string())) {
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, SYMBOL_TYPE, rdx);
@@ -5187,6 +4995,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(zero, if_true, if_false, fall_through);
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(check, factory->type##_string())) { \
+ __ JumpIfSmi(rax, if_false); \
+ __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset)); \
+ __ CompareRoot(rax, Heap::k##Type##MapRootIndex); \
+ Split(equal, if_true, if_false, fall_through);
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
} else {
if (if_false != fall_through) __ jmp(if_false);
}
@@ -5323,21 +5141,21 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_script_scope() ||
- declaration_scope->is_module_scope()) {
+ Scope* closure_scope = scope()->ClosureScope();
+ if (closure_scope->is_script_scope() ||
+ closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
__ Push(Smi::FromInt(0));
- } else if (declaration_scope->is_eval_scope()) {
+ } else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
__ Push(ContextOperand(rsi, Context::CLOSURE_INDEX));
} else {
- DCHECK(declaration_scope->is_function_scope());
+ DCHECK(closure_scope->is_function_scope());
__ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
}
diff --git a/deps/v8/src/full-codegen/x87/OWNERS b/deps/v8/src/full-codegen/x87/OWNERS
new file mode 100644
index 0000000000..dd9998b261
--- /dev/null
+++ b/deps/v8/src/full-codegen/x87/OWNERS
@@ -0,0 +1 @@
+weiliang.lin@intel.com
diff --git a/deps/v8/src/x87/full-codegen-x87.cc b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
index 7b1a35dcd7..cfa752c191 100644
--- a/deps/v8/src/x87/full-codegen-x87.cc
+++ b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
@@ -2,19 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
-#include "src/debug.h"
-#include "src/full-codegen.h"
+#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
#include "src/parser.h"
#include "src/scopes.h"
+#include "src/x87/frames-x87.h"
namespace v8 {
namespace internal {
@@ -102,7 +101,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
@@ -140,7 +139,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->function()->kind()) || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(function()->kind()) || locals_count == 0);
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
@@ -317,7 +316,7 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !is_simple_parameter_list()) {
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
@@ -344,18 +343,14 @@ void FullCodeGenerator::Generate() {
} else {
PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- DCHECK(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_LEGACY);
- DCHECK(!function->proxy()->var()->IsUnallocatedOrGlobalSlot());
- VisitVariableDeclaration(function);
- }
VisitDeclarations(scope()->declarations());
}
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
+
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
@@ -461,42 +456,19 @@ void FullCodeGenerator::EmitReturnSequence() {
__ pop(eax);
EmitProfilingCounterReset();
__ bind(&ok);
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
+
SetReturnPosition(function());
- __ RecordJSReturn();
- // Do not use the leave instruction here because it is too short to
- // patch with the code required by the debugger.
- __ mov(esp, ebp);
int no_frame_start = masm_->pc_offset();
- __ pop(ebp);
+ __ leave();
int arg_count = info_->scope()->num_parameters() + 1;
int arguments_bytes = arg_count * kPointerSize;
__ Ret(arguments_bytes, ecx);
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- DCHECK(Assembler::kJSReturnSequenceLength <=
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand operand = codegen()->VarOperand(var, result_register());
@@ -505,14 +477,6 @@ void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
}
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- // For simplicity we always test the accumulator register.
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
UNREACHABLE(); // Not used on X87.
}
@@ -663,10 +627,6 @@ void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
}
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
Handle<Object> value = flag
? isolate()->factory()->true_value()
@@ -850,13 +810,9 @@ void FullCodeGenerator::VisitVariableDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
- __ push(esi);
__ push(Immediate(variable->name()));
// VariableDeclaration nodes are always introduced in one of four modes.
DCHECK(IsDeclaredVariableMode(mode));
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
- __ push(Immediate(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
@@ -866,7 +822,10 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
}
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+ __ CallRuntime(IsImmutableVariableMode(mode)
+ ? Runtime::kDeclareReadOnlyLookupSlot
+ : Runtime::kDeclareLookupSlot,
+ 2);
break;
}
}
@@ -912,52 +871,20 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
- __ push(esi);
__ push(Immediate(variable->name()));
- __ push(Immediate(Smi::FromInt(NONE)));
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case VariableLocation::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
+ __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
break;
}
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- case VariableLocation::LOOKUP:
- UNREACHABLE();
}
}
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
- __ push(esi); // The context is the first argument.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kDeclareGlobals, 2);
// Return value is ignored.
}
@@ -1090,8 +1017,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &done_convert, Label::kNear);
__ bind(&convert);
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
__ bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ push(eax);
@@ -1268,12 +1195,6 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
}
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
int offset,
FeedbackVectorICSlot slot) {
@@ -1290,7 +1211,7 @@ void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofState typeof_state,
+ TypeofMode typeof_mode,
Label* slow) {
Register context = esi;
Register temp = edx;
@@ -1339,7 +1260,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
// All extension objects were empty and it is safe to use a normal global
// load machinery.
- EmitGlobalVariableLoad(proxy, typeof_state);
+ EmitGlobalVariableLoad(proxy, typeof_mode);
}
@@ -1374,9 +1295,8 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
+ TypeofMode typeof_mode,
+ Label* slow, Label* done) {
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
@@ -1384,7 +1304,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
// containing the eval.
Variable* var = proxy->var();
if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
+ EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
__ jmp(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
@@ -1406,22 +1326,36 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
+ TypeofMode typeof_mode) {
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), var->name());
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
- // Inside typeof use a regular load, not a contextual load, to avoid
- // a reference error.
- CallLoadIC(typeof_state == NOT_INSIDE_TYPEOF ? CONTEXTUAL : NOT_CONTEXTUAL);
+ if (var->IsGlobalSlot()) {
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ int const slot = var->index();
+ int const depth = scope()->ContextChainLength(var->scope());
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ Move(LoadGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
+ LoadGlobalViaContextStub stub(isolate(), depth);
+ __ CallStub(&stub);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+
+ } else {
+ __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::NameRegister(), var->name());
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
+ }
}
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
- TypeofState typeof_state) {
+ TypeofMode typeof_mode) {
SetExpressionPosition(proxy);
PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
Variable* var = proxy->var();
@@ -1432,7 +1366,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- EmitGlobalVariableLoad(proxy, typeof_state);
+ EmitGlobalVariableLoad(proxy, typeof_mode);
context()->Plug(eax);
break;
}
@@ -1440,7 +1374,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
case VariableLocation::CONTEXT: {
- DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_state);
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (var->binding_needs_init()) {
@@ -1471,8 +1405,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
} else if (var->is_this()) {
- CHECK(info_->function() != nullptr &&
- (info_->function()->kind() & kSubclassConstructor) != 0);
+ CHECK(info_->has_literal() &&
+ (info_->literal()->kind() & kSubclassConstructor) != 0);
// TODO(dslomov): implement 'this' hole check elimination.
skip_init_check = false;
} else {
@@ -1513,12 +1447,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_state, &slow, &done);
+ EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
__ bind(&slow);
__ push(esi); // Context.
__ push(Immediate(var->name()));
Runtime::FunctionId function_id =
- typeof_state == NOT_INSIDE_TYPEOF
+ typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
__ CallRuntime(function_id, 2);
@@ -1917,7 +1851,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpression());
+ DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
SetExpressionPosition(expr, INSERT_BREAK);
@@ -2071,8 +2005,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label suspend, continuation, post_runtime, resume;
__ jmp(&suspend);
-
__ bind(&continuation);
+ __ RecordGeneratorContinuation();
__ jmp(&resume);
__ bind(&suspend);
@@ -2145,9 +2079,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
EnterTryBlock(handler_index, &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(eax); // result
+
__ jmp(&l_suspend);
__ bind(&l_continuation);
+ __ RecordGeneratorContinuation();
__ jmp(&l_resume);
+
__ bind(&l_suspend);
const int generator_object_depth = kPointerSize + try_block_size;
__ mov(eax, Operand(esp, generator_object_depth));
@@ -2185,6 +2122,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ mov(edi, eax);
__ mov(Operand(esp, 2 * kPointerSize), edi);
+ SetCallPosition(expr, 1);
CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
__ CallStub(&stub);
@@ -2199,7 +2137,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
isolate()->factory()->done_string()); // "done"
__ mov(LoadDescriptor::SlotRegister(),
Immediate(SmiFromSlot(expr->DoneFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL); // result.done in eax
+ CallLoadIC(NOT_INSIDE_TYPEOF); // result.done in eax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
__ test(eax, eax);
@@ -2211,7 +2149,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
isolate()->factory()->value_string()); // "value"
__ mov(LoadDescriptor::SlotRegister(),
Immediate(SmiFromSlot(expr->ValueFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL); // result.value in eax
+ CallLoadIC(NOT_INSIDE_TYPEOF); // result.value in eax
context()->DropAndPlug(2, eax); // drop iter and g
break;
}
@@ -2350,7 +2288,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
__ mov(LoadDescriptor::SlotRegister(),
Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL, language_mode());
+ CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
}
@@ -2536,11 +2474,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
}
}
- // prototype
- __ CallRuntime(Runtime::kToFastProperties, 1);
-
- // constructor
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ // Set both the prototype and constructor to have fast properties, and also
+ // freeze them in strong mode.
+ __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
}
@@ -2557,7 +2493,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
FeedbackVectorICSlot slot) {
- DCHECK(expr->IsValidReferenceExpression());
+ DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
LhsKind assign_type = Property::GetAssignType(prop);
@@ -2648,13 +2584,33 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
FeedbackVectorICSlot slot) {
- if (var->IsUnallocatedOrGlobalSlot()) {
+ if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), var->name());
__ mov(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
CallStoreIC();
+ } else if (var->IsGlobalSlot()) {
+ // Global var, const, or let.
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ int const slot = var->index();
+ int const depth = scope()->ContextChainLength(var->scope());
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ Move(StoreGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
+ DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(eax));
+ StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
+ __ CallStub(&stub);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ Push(eax);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
+
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2683,6 +2639,19 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&const_error);
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ } else if (var->is_this() && op == Token::INIT_CONST) {
+ // Initializing assignment to const {this} needs a write barrier.
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label uninitialized_this;
+ MemOperand location = VarOperand(var, ecx);
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(equal, &uninitialized_this);
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@@ -2986,7 +2955,7 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
__ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
@@ -3025,22 +2994,6 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
-void FullCodeGenerator::EmitInitializeThisAfterSuper(
- SuperCallReference* super_call_ref, FeedbackVectorICSlot slot) {
- Variable* this_var = super_call_ref->this_var()->var();
- GetVar(ecx, this_var);
- __ cmp(ecx, isolate()->factory()->the_hole_value());
-
- Label uninitialized_this;
- __ j(equal, &uninitialized_this);
- __ push(Immediate(this_var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&uninitialized_this);
-
- EmitVariableAssignment(this_var, Token::INIT_CONST, slot);
-}
-
-
// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
VariableProxy* callee = expr->expression()->AsVariableProxy();
@@ -3117,7 +3070,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -3188,7 +3141,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetExpressionPosition(expr);
+ SetConstructCallPosition(expr);
// Load function and argument count into edi and eax.
__ Move(eax, Immediate(arg_count));
@@ -3216,9 +3169,6 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- VariableProxy* new_target_proxy = super_call_ref->new_target_var();
- VisitForStackValue(new_target_proxy);
-
EmitLoadSuperConstructor(super_call_ref);
__ push(result_register());
@@ -3231,7 +3181,11 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Call the construct call builtin that handles allocation and
// constructor invocation.
- SetExpressionPosition(expr);
+ SetConstructCallPosition(expr);
+
+ // Load original constructor into ecx.
+ VisitForAccumulatorValue(super_call_ref->new_target_var());
+ __ mov(ecx, result_register());
// Load function and argument count into edi and eax.
__ Move(eax, Immediate(arg_count));
@@ -3253,11 +3207,8 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
__ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
-
RecordJSReturnSite(expr);
- EmitInitializeThisAfterSuper(super_call_ref, expr->CallFeedbackICSlot());
context()->Plug(eax);
}
@@ -3358,7 +3309,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3368,15 +3319,13 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
__ JumpIfSmi(eax, if_false);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsUndetectable));
+ __ CmpObjectType(eax, SIMD128_VALUE_TYPE, ebx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(not_zero, if_true, if_false, fall_through);
+ Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3768,33 +3717,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3942,18 +3864,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- __ CallRuntime(Runtime::kMathPowSlow, 2);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -3996,6 +3906,19 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into eax and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4122,19 +4045,6 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() >= 2);
@@ -4179,11 +4089,14 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ CallRuntime(Runtime::kGetPrototype, 1);
__ push(result_register());
+ // Load original constructor into ecx.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ mov(ebx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor_frame);
// default constructor has no arguments, so no adaptor frame means no args.
__ mov(eax, Immediate(0));
@@ -4192,17 +4105,17 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
// Copy arguments from adaptor frame.
{
__ bind(&adaptor_frame);
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(ecx);
+ __ mov(ebx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(ebx);
- __ mov(eax, ecx);
- __ lea(edx, Operand(edx, ecx, times_pointer_size,
+ __ mov(eax, ebx);
+ __ lea(edx, Operand(edx, ebx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
Label loop;
__ bind(&loop);
__ push(Operand(edx, -1 * kPointerSize));
__ sub(edx, Immediate(kPointerSize));
- __ dec(ecx);
+ __ dec(ebx);
__ j(not_zero, &loop);
}
@@ -4234,55 +4147,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
-
- DCHECK_NOT_NULL(args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort(kAttemptToUseUndefinedCache);
- __ mov(eax, isolate()->factory()->undefined_value());
- context()->Plug(eax);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = eax;
- Register cache = ebx;
- Register tmp = ecx;
- __ mov(cache, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
- __ mov(cache,
- FieldOperand(cache, GlobalObject::kNativeContextOffset));
- __ mov(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ mov(cache,
- FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
- Label done, not_found;
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ mov(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
- // tmp now holds finger offset as a smi.
- __ cmp(key, FixedArrayElementOperand(cache, tmp));
- __ j(not_equal, &not_found);
-
- __ mov(eax, FixedArrayElementOperand(cache, tmp, 1));
- __ jmp(&done);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ push(cache);
- __ push(key);
- __ CallRuntime(Runtime::kGetFromCacheRT, 2);
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4595,46 +4459,6 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
- // Assert: expr == CallRuntime("ReflectConstruct")
- DCHECK_EQ(1, expr->arguments()->length());
- CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
-
- ZoneList<Expression*>* args = call->arguments();
- DCHECK_EQ(3, args->length());
-
- SuperCallReference* super_call_ref = args->at(0)->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Load ReflectConstruct function
- EmitLoadJSRuntimeFunction(call);
-
- // Push the target function under the receiver
- __ push(Operand(esp, 0));
- __ mov(Operand(esp, kPointerSize), eax);
-
- // Push super constructor
- EmitLoadSuperConstructor(super_call_ref);
- __ Push(result_register());
-
- // Push arguments array
- VisitForStackValue(args->at(1));
-
- // Push NewTarget
- DCHECK(args->at(2)->IsVariableProxy());
- VisitForStackValue(args->at(2));
-
- EmitCallJSRuntimeFunction(call);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax);
-
- // TODO(mvstanton): with FLAG_vector_stores this needs a slot id.
- EmitInitializeThisAfterSuper(super_call_ref);
-}
-
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push the builtins object as receiver.
__ mov(eax, GlobalObjectOperand());
@@ -4645,7 +4469,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ mov(LoadDescriptor::NameRegister(), Immediate(expr->name()));
__ mov(LoadDescriptor::SlotRegister(),
Immediate(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_CONTEXTUAL);
+ CallLoadIC(NOT_INSIDE_TYPEOF);
}
@@ -4653,7 +4477,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetExpressionPosition(expr);
+ SetCallPosition(expr, arg_count);
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@@ -4721,8 +4545,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy,
+ 2);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4733,8 +4559,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (var->IsUnallocatedOrGlobalSlot()) {
__ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(SLOPPY)));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
context()->Plug(eax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global variables is false. 'this' is
@@ -4830,7 +4655,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- DCHECK(expr->expression()->IsValidReferenceExpression());
+ DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ CountOperation");
@@ -5114,11 +4939,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
- __ j(above_equal, if_false);
- // Check for undetectable objects => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- Split(zero, if_true, if_false, fall_through);
+ Split(below, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->symbol_string())) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, SYMBOL_TYPE, edx);
@@ -5156,6 +4977,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
Split(zero, if_true, if_false, fall_through);
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(check, factory->type##_string())) { \
+ __ JumpIfSmi(eax, if_false); \
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset), \
+ isolate()->factory()->type##_map()); \
+ Split(equal, if_true, if_false, fall_through);
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
} else {
if (if_false != fall_through) __ jmp(if_false);
}
@@ -5293,21 +5124,21 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_script_scope() ||
- declaration_scope->is_module_scope()) {
+ Scope* closure_scope = scope()->ClosureScope();
+ if (closure_scope->is_script_scope() ||
+ closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
__ push(Immediate(Smi::FromInt(0)));
- } else if (declaration_scope->is_eval_scope()) {
+ } else if (closure_scope->is_eval_scope()) {
// Contexts nested inside eval code have the same closure as the context
// calling eval, not the anonymous closure containing the eval code.
// Fetch it from the context.
__ push(ContextOperand(esi, Context::CLOSURE_INDEX));
} else {
- DCHECK(declaration_scope->is_function_scope());
+ DCHECK(closure_scope->is_function_scope());
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
}
diff --git a/deps/v8/src/func-name-inferrer.cc b/deps/v8/src/func-name-inferrer.cc
index 9415b8985d..5006c03eb6 100644
--- a/deps/v8/src/func-name-inferrer.cc
+++ b/deps/v8/src/func-name-inferrer.cc
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/func-name-inferrer.h"
#include "src/ast.h"
#include "src/ast-value-factory.h"
-#include "src/func-name-inferrer.h"
#include "src/list-inl.h"
namespace v8 {
diff --git a/deps/v8/src/futex-emulation.cc b/deps/v8/src/futex-emulation.cc
new file mode 100644
index 0000000000..5a0ce07f1a
--- /dev/null
+++ b/deps/v8/src/futex-emulation.cc
@@ -0,0 +1,231 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/futex-emulation.h"
+
+#include <limits>
+
+#include "src/base/macros.h"
+#include "src/base/platform/time.h"
+#include "src/conversions.h"
+#include "src/handles-inl.h"
+#include "src/isolate.h"
+#include "src/list-inl.h"
+
+namespace v8 {
+namespace internal {
+
+base::LazyMutex FutexEmulation::mutex_ = LAZY_MUTEX_INITIALIZER;
+base::LazyInstance<FutexWaitList>::type FutexEmulation::wait_list_ =
+ LAZY_INSTANCE_INITIALIZER;
+
+
+FutexWaitList::FutexWaitList() : head_(nullptr), tail_(nullptr) {}
+
+
+void FutexWaitList::AddNode(FutexWaitListNode* node) {
+ DCHECK(node->prev_ == nullptr && node->next_ == nullptr);
+ if (tail_) {
+ tail_->next_ = node;
+ } else {
+ head_ = node;
+ }
+
+ node->prev_ = tail_;
+ node->next_ = nullptr;
+ tail_ = node;
+}
+
+
+void FutexWaitList::RemoveNode(FutexWaitListNode* node) {
+ if (node->prev_) {
+ node->prev_->next_ = node->next_;
+ } else {
+ head_ = node->next_;
+ }
+
+ if (node->next_) {
+ node->next_->prev_ = node->prev_;
+ } else {
+ tail_ = node->prev_;
+ }
+
+ node->prev_ = node->next_ = nullptr;
+}
+
+
+Object* FutexEmulation::Wait(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer, size_t addr,
+ int32_t value, double rel_timeout_ms) {
+ // We never want to wait longer than this amount of time; this way we can
+ // interrupt this thread even if this is an "infinitely blocking" wait.
+ // TODO(binji): come up with a better way of interrupting only when
+ // necessary, rather than busy-waiting.
+ const base::TimeDelta kMaxWaitTime = base::TimeDelta::FromMilliseconds(50);
+
+ DCHECK(addr < NumberToSize(isolate, array_buffer->byte_length()));
+
+ void* backing_store = array_buffer->backing_store();
+ int32_t* p =
+ reinterpret_cast<int32_t*>(static_cast<int8_t*>(backing_store) + addr);
+
+ base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
+
+ if (*p != value) {
+ return Smi::FromInt(Result::kNotEqual);
+ }
+
+ FutexWaitListNode* node = isolate->futex_wait_list_node();
+
+ node->backing_store_ = backing_store;
+ node->wait_addr_ = addr;
+ node->waiting_ = true;
+
+ bool use_timeout = rel_timeout_ms != V8_INFINITY;
+
+ base::TimeDelta rel_timeout;
+ if (use_timeout) {
+ // Convert to nanoseconds.
+ double rel_timeout_ns = rel_timeout_ms *
+ base::Time::kNanosecondsPerMicrosecond *
+ base::Time::kMicrosecondsPerMillisecond;
+ if (rel_timeout_ns >
+ static_cast<double>(std::numeric_limits<int64_t>::max())) {
+ // 2**63 nanoseconds is 292 years. Let's just treat anything greater as
+ // infinite.
+ use_timeout = false;
+ } else {
+ rel_timeout = base::TimeDelta::FromNanoseconds(
+ static_cast<int64_t>(rel_timeout_ns));
+ }
+ }
+
+ base::TimeTicks start_time = base::TimeTicks::Now();
+ base::TimeTicks timeout_time = start_time + rel_timeout;
+
+ wait_list_.Pointer()->AddNode(node);
+
+ Object* result;
+
+ while (true) {
+ base::TimeTicks current_time = base::TimeTicks::Now();
+ if (use_timeout && current_time > timeout_time) {
+ result = Smi::FromInt(Result::kTimedOut);
+ break;
+ }
+
+ base::TimeDelta time_until_timeout = timeout_time - current_time;
+ base::TimeDelta time_to_wait =
+ (use_timeout && time_until_timeout < kMaxWaitTime) ? time_until_timeout
+ : kMaxWaitTime;
+
+ bool wait_for_result = node->cond_.WaitFor(mutex_.Pointer(), time_to_wait);
+ USE(wait_for_result);
+
+ if (!node->waiting_) {
+ result = Smi::FromInt(Result::kOk);
+ break;
+ }
+
+ // Spurious wakeup or timeout. Potentially handle interrupts before
+ // continuing to wait.
+ Object* interrupt_object = isolate->stack_guard()->HandleInterrupts();
+ if (interrupt_object->IsException()) {
+ result = interrupt_object;
+ break;
+ }
+ }
+
+ wait_list_.Pointer()->RemoveNode(node);
+
+ return result;
+}
+
+
+Object* FutexEmulation::Wake(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer, size_t addr,
+ int num_waiters_to_wake) {
+ DCHECK(addr < NumberToSize(isolate, array_buffer->byte_length()));
+
+ int waiters_woken = 0;
+ void* backing_store = array_buffer->backing_store();
+
+ base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
+ FutexWaitListNode* node = wait_list_.Pointer()->head_;
+ while (node && num_waiters_to_wake > 0) {
+ if (backing_store == node->backing_store_ && addr == node->wait_addr_) {
+ node->waiting_ = false;
+ node->cond_.NotifyOne();
+ --num_waiters_to_wake;
+ waiters_woken++;
+ }
+
+ node = node->next_;
+ }
+
+ return Smi::FromInt(waiters_woken);
+}
+
+
+Object* FutexEmulation::WakeOrRequeue(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer,
+ size_t addr, int num_waiters_to_wake,
+ int32_t value, size_t addr2) {
+ DCHECK(addr < NumberToSize(isolate, array_buffer->byte_length()));
+ DCHECK(addr2 < NumberToSize(isolate, array_buffer->byte_length()));
+
+ void* backing_store = array_buffer->backing_store();
+ int32_t* p =
+ reinterpret_cast<int32_t*>(static_cast<int8_t*>(backing_store) + addr);
+
+ base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
+ if (*p != value) {
+ return Smi::FromInt(Result::kNotEqual);
+ }
+
+ // Wake |num_waiters_to_wake|
+ int waiters_woken = 0;
+ FutexWaitListNode* node = wait_list_.Pointer()->head_;
+ while (node) {
+ if (backing_store == node->backing_store_ && addr == node->wait_addr_) {
+ if (num_waiters_to_wake > 0) {
+ node->waiting_ = false;
+ node->cond_.NotifyOne();
+ --num_waiters_to_wake;
+ waiters_woken++;
+ } else {
+ node->wait_addr_ = addr2;
+ }
+ }
+
+ node = node->next_;
+ }
+
+ return Smi::FromInt(waiters_woken);
+}
+
+
+Object* FutexEmulation::NumWaitersForTesting(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer,
+ size_t addr) {
+ DCHECK(addr < NumberToSize(isolate, array_buffer->byte_length()));
+ void* backing_store = array_buffer->backing_store();
+
+ base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
+
+ int waiters = 0;
+ FutexWaitListNode* node = wait_list_.Pointer()->head_;
+ while (node) {
+ if (backing_store == node->backing_store_ && addr == node->wait_addr_) {
+ waiters++;
+ }
+
+ node = node->next_;
+ }
+
+ return Smi::FromInt(waiters);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/futex-emulation.h b/deps/v8/src/futex-emulation.h
new file mode 100644
index 0000000000..86b5f78811
--- /dev/null
+++ b/deps/v8/src/futex-emulation.h
@@ -0,0 +1,124 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_FUTEX_EMULATION_H_
+#define V8_FUTEX_EMULATION_H_
+
+#include <stdint.h>
+
+#include "src/allocation.h"
+#include "src/base/lazy-instance.h"
+#include "src/base/macros.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
+#include "src/handles.h"
+
+// Support for emulating futexes, a low-level synchronization primitive. They
+// are natively supported by Linux, but must be emulated for other platforms.
+// This library emulates them on all platforms using mutexes and condition
+// variables for consistency.
+//
+// This is used by the Futex API defined in the SharedArrayBuffer draft spec,
+// found here: https://github.com/lars-t-hansen/ecmascript_sharedmem
+
+namespace v8 {
+
+namespace base {
+class TimeDelta;
+} // base
+
+namespace internal {
+
+class Isolate;
+class JSArrayBuffer;
+
+class FutexWaitListNode {
+ public:
+ FutexWaitListNode()
+ : prev_(nullptr),
+ next_(nullptr),
+ backing_store_(nullptr),
+ wait_addr_(0),
+ waiting_(false) {}
+
+ private:
+ friend class FutexEmulation;
+ friend class FutexWaitList;
+
+ base::ConditionVariable cond_;
+ FutexWaitListNode* prev_;
+ FutexWaitListNode* next_;
+ void* backing_store_;
+ size_t wait_addr_;
+ bool waiting_;
+
+ DISALLOW_COPY_AND_ASSIGN(FutexWaitListNode);
+};
+
+
+class FutexWaitList {
+ public:
+ FutexWaitList();
+
+ void AddNode(FutexWaitListNode* node);
+ void RemoveNode(FutexWaitListNode* node);
+
+ private:
+ friend class FutexEmulation;
+
+ FutexWaitListNode* head_;
+ FutexWaitListNode* tail_;
+
+ DISALLOW_COPY_AND_ASSIGN(FutexWaitList);
+};
+
+
+class FutexEmulation : public AllStatic {
+ public:
+ // These must match the values in src/harmony-atomics.js
+ enum Result {
+ kOk = 0,
+ kNotEqual = -1,
+ kTimedOut = -2,
+ };
+
+ // Check that array_buffer[addr] == value, and return kNotEqual if not. If
+ // they are equal, block execution on |isolate|'s thread until woken via
+ // |Wake|, or when the time given in |rel_timeout_ms| elapses. Note that
+ // |rel_timeout_ms| can be Infinity.
+ // If woken, return kOk, otherwise return kTimedOut. The initial check and
+ // the decision to wait happen atomically.
+ static Object* Wait(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
+ size_t addr, int32_t value, double rel_timeout_ms);
+
+ // Wake |num_waiters_to_wake| threads that are waiting on the given |addr|.
+ // The rest of the waiters will continue to wait. The return value is the
+ // number of woken waiters.
+ static Object* Wake(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
+ size_t addr, int num_waiters_to_wake);
+
+ // Check that array_buffer[addr] == value, and return kNotEqual if not. If
+ // they are equal, wake |num_waiters_to_wake| threads that are waiting on the
+ // given |addr|. The rest of the waiters will continue to wait, but will now
+ // be waiting on |addr2| instead of |addr|. The return value is the number of
+ // woken waiters or kNotEqual as described above.
+ static Object* WakeOrRequeue(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer, size_t addr,
+ int num_waiters_to_wake, int32_t value,
+ size_t addr2);
+
+ // Return the number of threads waiting on |addr|. Should only be used for
+ // testing.
+ static Object* NumWaitersForTesting(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer,
+ size_t addr);
+
+ private:
+ static base::LazyMutex mutex_;
+ static base::LazyInstance<FutexWaitList>::type wait_list_;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_FUTEX_EMULATION_H_
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 2de4e66b49..d0fd8223e1 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/gdb-jit.h"
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
@@ -10,12 +10,12 @@
#include "src/compiler.h"
#include "src/frames-inl.h"
#include "src/frames.h"
-#include "src/gdb-jit.h"
#include "src/global-handles.h"
#include "src/messages.h"
#include "src/objects.h"
#include "src/ostreams.h"
#include "src/snapshot/natives.h"
+#include "src/splay-tree-inl.h"
namespace v8 {
namespace internal {
@@ -355,17 +355,13 @@ class ELFSection : public DebugSectionBase<ELFSectionHeader> {
#if defined(__MACH_O)
class MachOTextSection : public MachOSection {
public:
- MachOTextSection(uintptr_t align,
- uintptr_t addr,
- uintptr_t size)
- : MachOSection("__text",
- "__TEXT",
- align,
+ MachOTextSection(uint32_t align, uintptr_t addr, uintptr_t size)
+ : MachOSection("__text", "__TEXT", align,
MachOSection::S_REGULAR |
MachOSection::S_ATTR_SOME_INSTRUCTIONS |
MachOSection::S_ATTR_PURE_INSTRUCTIONS),
addr_(addr),
- size_(size) { }
+ size_(size) {}
protected:
virtual void PopulateHeader(Writer::Slot<Header> header) {
@@ -588,7 +584,8 @@ class MachO BASE_EMBEDDED {
Writer::Slot<MachOSection::Header> headers =
w->CreateSlotsHere<MachOSection::Header>(sections_.length());
cmd->fileoff = w->position();
- header->sizeofcmds = w->position() - load_command_start;
+ header->sizeofcmds =
+ static_cast<uint32_t>(w->position() - load_command_start);
for (int section = 0; section < sections_.length(); ++section) {
sections_[section]->PopulateHeader(headers.at(section));
sections_[section]->WriteBody(headers.at(section), w);
@@ -652,9 +649,13 @@ class ELF BASE_EMBEDDED {
(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT))
const uint8_t ident[16] =
{ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT
+#elif(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT) || \
+ (V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
const uint8_t ident[16] =
{ 0x7f, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+#elif V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN && V8_OS_LINUX
+ const uint8_t ident[16] = {0x7f, 'E', 'L', 'F', 2, 2, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0};
#else
#error Unsupported target architecture.
#endif
@@ -671,6 +672,14 @@ class ELF BASE_EMBEDDED {
// Set to EM_ARM, defined as 40, in "ARM ELF File Format" at
// infocenter.arm.com/help/topic/com.arm.doc.dui0101a/DUI0101A_Elf.pdf
header->machine = 40;
+#elif V8_TARGET_ARCH_PPC64 && V8_OS_LINUX
+ // Set to EM_PPC64, defined as 21, in Power ABI,
+ // Join the next 4 lines, omitting the spaces and double-slashes.
+ // https://www-03.ibm.com/technologyconnect/tgcm/TGCMFileServlet.wss/
+ // ABI64BitOpenPOWERv1.1_16July2015_pub.pdf?
+ // id=B81AEC1A37F5DAF185257C3E004E8845&linkid=1n0000&c_t=
+ // c9xw7v5dzsj7gt1ifgf4cjbcnskqptmr
+ header->machine = 21;
#else
#error Unsupported target architecture.
#endif
@@ -786,7 +795,8 @@ class ELFSymbol BASE_EMBEDDED {
uint8_t other;
uint16_t section;
};
-#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT
+#elif(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT) || \
+ (V8_TARGET_ARCH_PPC64 && V8_OS_LINUX)
struct SerializedLayout {
SerializedLayout(uint32_t name,
uintptr_t value,
@@ -993,7 +1003,7 @@ class CodeDescription BASE_EMBEDDED {
}
#endif
- SmartArrayPointer<char> GetFilename() {
+ base::SmartArrayPointer<char> GetFilename() {
return String::cast(script()->name())->ToCString();
}
@@ -1064,6 +1074,30 @@ class DebugInfoSection : public DebugSection {
DW_OP_reg5 = 0x55,
DW_OP_reg6 = 0x56,
DW_OP_reg7 = 0x57,
+ DW_OP_reg8 = 0x58,
+ DW_OP_reg9 = 0x59,
+ DW_OP_reg10 = 0x5a,
+ DW_OP_reg11 = 0x5b,
+ DW_OP_reg12 = 0x5c,
+ DW_OP_reg13 = 0x5d,
+ DW_OP_reg14 = 0x5e,
+ DW_OP_reg15 = 0x5f,
+ DW_OP_reg16 = 0x60,
+ DW_OP_reg17 = 0x61,
+ DW_OP_reg18 = 0x62,
+ DW_OP_reg19 = 0x63,
+ DW_OP_reg20 = 0x64,
+ DW_OP_reg21 = 0x65,
+ DW_OP_reg22 = 0x66,
+ DW_OP_reg23 = 0x67,
+ DW_OP_reg24 = 0x68,
+ DW_OP_reg25 = 0x69,
+ DW_OP_reg26 = 0x6a,
+ DW_OP_reg27 = 0x6b,
+ DW_OP_reg28 = 0x6c,
+ DW_OP_reg29 = 0x6d,
+ DW_OP_reg30 = 0x6e,
+ DW_OP_reg31 = 0x6f,
DW_OP_fbreg = 0x91 // 1 param: SLEB128 offset
};
@@ -1109,6 +1143,8 @@ class DebugInfoSection : public DebugSection {
UNIMPLEMENTED();
#elif V8_TARGET_ARCH_MIPS64
UNIMPLEMENTED();
+#elif V8_TARGET_ARCH_PPC64 && V8_OS_LINUX
+ w->Write<uint8_t>(DW_OP_reg31); // The frame pointer is here on PPC64.
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/gdb-jit.h b/deps/v8/src/gdb-jit.h
index 45382702da..b915e71ebe 100644
--- a/deps/v8/src/gdb-jit.h
+++ b/deps/v8/src/gdb-jit.h
@@ -5,7 +5,7 @@
#ifndef V8_GDB_JIT_H_
#define V8_GDB_JIT_H_
-#include "src/v8.h"
+#include "include/v8.h"
//
// GDB has two ways of interacting with JIT code. With the "JIT compilation
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index aa6542baee..befa173767 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -495,6 +495,31 @@ class GlobalHandles::NodeIterator {
DISALLOW_COPY_AND_ASSIGN(NodeIterator);
};
+class GlobalHandles::PendingPhantomCallbacksSecondPassTask
+ : public v8::internal::CancelableTask {
+ public:
+ // Takes ownership of the contents of pending_phantom_callbacks, leaving it in
+ // the same state it would be after a call to Clear().
+ PendingPhantomCallbacksSecondPassTask(
+ List<PendingPhantomCallback>* pending_phantom_callbacks, Isolate* isolate)
+ : CancelableTask(isolate) {
+ pending_phantom_callbacks_.Swap(pending_phantom_callbacks);
+ }
+
+ void RunInternal() override {
+ isolate_->heap()->CallGCPrologueCallbacks(
+ GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
+ InvokeSecondPassPhantomCallbacks(&pending_phantom_callbacks_, isolate_);
+ isolate_->heap()->CallGCEpilogueCallbacks(
+ GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
+ }
+
+ private:
+ List<PendingPhantomCallback> pending_phantom_callbacks_;
+
+ DISALLOW_COPY_AND_ASSIGN(PendingPhantomCallbacksSecondPassTask);
+};
+
GlobalHandles::GlobalHandles(Isolate* isolate)
: isolate_(isolate),
@@ -709,6 +734,19 @@ bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
}
+void GlobalHandles::InvokeSecondPassPhantomCallbacks(
+ List<PendingPhantomCallback>* callbacks, Isolate* isolate) {
+ while (callbacks->length() != 0) {
+ auto callback = callbacks->RemoveLast();
+ DCHECK(callback.node() == nullptr);
+ // No second pass callback required.
+ if (callback.callback() == nullptr) continue;
+ // Fire second pass callback
+ callback.Invoke(isolate);
+ }
+}
+
+
int GlobalHandles::PostScavengeProcessing(
const int initial_post_gc_processing_count) {
int freed_nodes = 0;
@@ -791,7 +829,8 @@ void GlobalHandles::UpdateListOfNewSpaceNodes() {
}
-int GlobalHandles::DispatchPendingPhantomCallbacks() {
+int GlobalHandles::DispatchPendingPhantomCallbacks(
+ bool synchronous_second_pass) {
int freed_nodes = 0;
{
// The initial pass callbacks must simply clear the nodes.
@@ -804,14 +843,19 @@ int GlobalHandles::DispatchPendingPhantomCallbacks() {
freed_nodes++;
}
}
- // The second pass empties the list.
- while (pending_phantom_callbacks_.length() != 0) {
- auto callback = pending_phantom_callbacks_.RemoveLast();
- DCHECK(callback.node() == nullptr);
- // No second pass callback required.
- if (callback.callback() == nullptr) continue;
- // Fire second pass callback.
- callback.Invoke(isolate());
+ if (pending_phantom_callbacks_.length() > 0) {
+ if (FLAG_optimize_for_size || FLAG_predictable || synchronous_second_pass) {
+ isolate()->heap()->CallGCPrologueCallbacks(
+ GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
+ InvokeSecondPassPhantomCallbacks(&pending_phantom_callbacks_, isolate());
+ isolate()->heap()->CallGCEpilogueCallbacks(
+ GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
+ } else {
+ auto task = new PendingPhantomCallbacksSecondPassTask(
+ &pending_phantom_callbacks_, isolate());
+ V8::GetCurrentPlatform()->CallOnForegroundThread(
+ reinterpret_cast<v8::Isolate*>(isolate()), task);
+ }
}
pending_phantom_callbacks_.Clear();
return freed_nodes;
@@ -838,14 +882,19 @@ void GlobalHandles::PendingPhantomCallback::Invoke(Isolate* isolate) {
}
-int GlobalHandles::PostGarbageCollectionProcessing(GarbageCollector collector) {
+int GlobalHandles::PostGarbageCollectionProcessing(
+ GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
DCHECK(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
const int initial_post_gc_processing_count = ++post_gc_processing_count_;
int freed_nodes = 0;
- freed_nodes += DispatchPendingPhantomCallbacks();
+ bool synchronous_second_pass =
+ (gc_callback_flags &
+ (kGCCallbackFlagForced |
+ kGCCallbackFlagSynchronousPhantomCallbackProcessing)) != 0;
+ freed_nodes += DispatchPendingPhantomCallbacks(synchronous_second_pass);
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// If the callbacks caused a nested GC, then return. See comment in
// PostScavengeProcessing.
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 6724847303..0ee8c20a37 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -181,7 +181,8 @@ class GlobalHandles {
// Process pending weak handles.
// Returns the number of freed nodes.
- int PostGarbageCollectionProcessing(GarbageCollector collector);
+ int PostGarbageCollectionProcessing(
+ GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags);
// Iterates over all strong handles.
void IterateStrongRoots(ObjectVisitor* v);
@@ -287,17 +288,21 @@ class GlobalHandles {
// don't assign any initial capacity.
static const int kObjectGroupConnectionsCapacity = 20;
+ class PendingPhantomCallback;
+
// Helpers for PostGarbageCollectionProcessing.
+ static void InvokeSecondPassPhantomCallbacks(
+ List<PendingPhantomCallback>* callbacks, Isolate* isolate);
int PostScavengeProcessing(int initial_post_gc_processing_count);
int PostMarkSweepProcessing(int initial_post_gc_processing_count);
- int DispatchPendingPhantomCallbacks();
+ int DispatchPendingPhantomCallbacks(bool synchronous_second_pass);
void UpdateListOfNewSpaceNodes();
// Internal node structures.
class Node;
class NodeBlock;
class NodeIterator;
- class PendingPhantomCallback;
+ class PendingPhantomCallbacksSecondPassTask;
Isolate* isolate_;
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index f85e92985a..5f1070382c 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -30,29 +30,6 @@
# define V8_INFINITY INFINITY
#endif
-#if V8_TARGET_ARCH_IA32 || (V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_32_BIT) || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
- V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_X87
-
-#define V8_TURBOFAN_BACKEND 1
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_PPC64
-// 64-bit TurboFan backends support 64-bit integer arithmetic.
-#define V8_TURBOFAN_BACKEND_64 1
-#else
-#define V8_TURBOFAN_BACKEND_64 0
-#endif
-
-#else
-#define V8_TURBOFAN_BACKEND 0
-#endif
-
-#if V8_TURBOFAN_BACKEND
-#define V8_TURBOFAN_TARGET 1
-#else
-#define V8_TURBOFAN_TARGET 0
-#endif
-
namespace v8 {
namespace base {
@@ -423,7 +400,6 @@ class CodeGenerator;
class CodeStub;
class Context;
class Debug;
-class Debugger;
class DebugInfo;
class Descriptor;
class DescriptorArray;
@@ -466,7 +442,6 @@ class String;
class Symbol;
class Name;
class Struct;
-class Symbol;
class Variable;
class RelocInfo;
class Deserializer;
@@ -540,6 +515,8 @@ enum VisitMode {
// Flag indicating whether code is built into the VM (one of the natives files).
enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
+// JavaScript defines two kinds of 'nil'.
+enum NilValue { kNullValue, kUndefinedValue };
// ParseRestriction is used to restrict the set of valid statements in a
// unit of compilation. Restriction violations cause a syntax error.
@@ -574,11 +551,6 @@ struct CodeDesc {
};
-// Callback function used for iterating objects in heap spaces,
-// for example, scanning heap objects.
-typedef int (*HeapObjectCallback)(HeapObject* obj);
-
-
// Callback function used for checking constraints when copying/relocating
// objects. Returns true if an object can be copied/relocated from its
// old_addr to a new_addr.
@@ -811,9 +783,6 @@ enum VariableMode {
IMPORT, // declared via 'import' declarations (last lexical)
// Variables introduced by the compiler:
- INTERNAL, // like VAR, but not user-visible (may or may not
- // be in a context)
-
TEMPORARY, // temporary variables (not user-visible), stack-allocated
// unless the scope as a whole has forced context allocation
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index 3022f288a3..b905c16a04 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -1,7 +1,6 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
#ifndef V8_HANDLES_INL_H_
#define V8_HANDLES_INL_H_
@@ -14,72 +13,8 @@
namespace v8 {
namespace internal {
-template<typename T>
-Handle<T>::Handle(T* obj) {
- location_ = HandleScope::CreateHandle(obj->GetIsolate(), obj);
-}
-
-
-template<typename T>
-Handle<T>::Handle(T* obj, Isolate* isolate) {
- location_ = HandleScope::CreateHandle(isolate, obj);
-}
-
-
-template <typename T>
-inline bool Handle<T>::is_identical_to(const Handle<T> o) const {
- // Dereferencing deferred handles to check object equality is safe.
- SLOW_DCHECK(
- (location_ == NULL || IsDereferenceAllowed(NO_DEFERRED_CHECK)) &&
- (o.location_ == NULL || o.IsDereferenceAllowed(NO_DEFERRED_CHECK)));
- if (location_ == o.location_) return true;
- if (location_ == NULL || o.location_ == NULL) return false;
- return *location_ == *o.location_;
-}
-
-
-template <typename T>
-inline T* Handle<T>::operator*() const {
- SLOW_DCHECK(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
- return *bit_cast<T**>(location_);
-}
-
-template <typename T>
-inline T** Handle<T>::location() const {
- SLOW_DCHECK(location_ == NULL ||
- IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
- return location_;
-}
-
-#ifdef DEBUG
-template <typename T>
-bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const {
- DCHECK(location_ != NULL);
- Object* object = *bit_cast<T**>(location_);
- if (object->IsSmi()) return true;
- HeapObject* heap_object = HeapObject::cast(object);
- Heap* heap = heap_object->GetHeap();
- Object** handle = reinterpret_cast<Object**>(location_);
- Object** roots_array_start = heap->roots_array_start();
- if (roots_array_start <= handle &&
- handle < roots_array_start + Heap::kStrongRootListLength &&
- heap->RootCanBeTreatedAsConstant(
- static_cast<Heap::RootListIndex>(handle - roots_array_start))) {
- return true;
- }
- if (!AllowHandleDereference::IsAllowed()) return false;
- if (mode == INCLUDE_DEFERRED_CHECK &&
- !AllowDeferredHandleDereference::IsAllowed()) {
- // Accessing cells, maps and internalized strings is safe.
- if (heap_object->IsCell()) return true;
- if (heap_object->IsMap()) return true;
- if (heap_object->IsInternalizedString()) return true;
- return !heap->isolate()->IsDeferredHandle(handle);
- }
- return true;
-}
-#endif
-
+HandleBase::HandleBase(Object* object, Isolate* isolate)
+ : location_(HandleScope::CreateHandle(isolate, object)) {}
HandleScope::HandleScope(Isolate* isolate) {
@@ -136,7 +71,7 @@ Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
CloseScope(isolate_, prev_next_, prev_limit_);
// Allocate one handle in the parent scope.
DCHECK(current->level > 0);
- Handle<T> result(CreateHandle<T>(isolate_, value));
+ Handle<T> result(value, isolate_);
// Reinitialize the current scope (so that it's ready
// to be used or closed again).
prev_next_ = current->next;
@@ -151,7 +86,7 @@ T** HandleScope::CreateHandle(Isolate* isolate, T* value) {
DCHECK(AllowHandleAllocation::IsAllowed());
HandleScopeData* current = isolate->handle_scope_data();
- internal::Object** cur = current->next;
+ Object** cur = current->next;
if (cur == current->limit) cur = Extend(isolate);
// Update the current next field, set the value in the created
// handle, and return the result.
@@ -190,6 +125,7 @@ inline SealHandleScope::~SealHandleScope() {
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HANDLES_INL_H_
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index d415315986..ca23a6f75f 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -9,6 +9,34 @@
namespace v8 {
namespace internal {
+#ifdef DEBUG
+bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
+ DCHECK_NOT_NULL(location_);
+ Object* object = *location_;
+ if (object->IsSmi()) return true;
+ HeapObject* heap_object = HeapObject::cast(object);
+ Heap* heap = heap_object->GetHeap();
+ Object** roots_array_start = heap->roots_array_start();
+ if (roots_array_start <= location_ &&
+ location_ < roots_array_start + Heap::kStrongRootListLength &&
+ heap->RootCanBeTreatedAsConstant(
+ static_cast<Heap::RootListIndex>(location_ - roots_array_start))) {
+ return true;
+ }
+ if (!AllowHandleDereference::IsAllowed()) return false;
+ if (mode == INCLUDE_DEFERRED_CHECK &&
+ !AllowDeferredHandleDereference::IsAllowed()) {
+ // Accessing cells, maps and internalized strings is safe.
+ if (heap_object->IsCell()) return true;
+ if (heap_object->IsMap()) return true;
+ if (heap_object->IsInternalizedString()) return true;
+ return !heap->isolate()->IsDeferredHandle(location_);
+ }
+ return true;
+}
+#endif
+
+
int HandleScope::NumberOfHandles(Isolate* isolate) {
HandleScopeImplementer* impl = isolate->handle_scope_implementer();
int n = impl->blocks()->length();
@@ -67,7 +95,7 @@ void HandleScope::DeleteExtensions(Isolate* isolate) {
void HandleScope::ZapRange(Object** start, Object** end) {
DCHECK(end - start <= kHandleBlockSize);
for (Object** p = start; p != end; p++) {
- *reinterpret_cast<Address*>(p) = v8::internal::kHandleZapValue;
+ *reinterpret_cast<Address*>(p) = kHandleZapValue;
}
}
#endif
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 162b6d282f..2db18307da 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -5,177 +5,221 @@
#ifndef V8_HANDLES_H_
#define V8_HANDLES_H_
-#include "src/objects.h"
+#include "include/v8.h"
+#include "src/base/macros.h"
+#include "src/checks.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
-// A Handle can be converted into a MaybeHandle. Converting a MaybeHandle
-// into a Handle requires checking that it does not point to NULL. This
-// ensures NULL checks before use.
-// Do not use MaybeHandle as argument type.
+// Forward declarations.
+class DeferredHandles;
+class HandleScopeImplementer;
+class Isolate;
+class Object;
-template<typename T>
-class MaybeHandle {
- public:
- INLINE(MaybeHandle()) : location_(NULL) { }
- // Constructor for handling automatic up casting from Handle.
- // Ex. Handle<JSArray> can be passed when MaybeHandle<Object> is expected.
- template <class S> MaybeHandle(Handle<S> handle) {
-#ifdef DEBUG
- T* a = NULL;
- S* b = NULL;
- a = b; // Fake assignment to enforce type checks.
- USE(a);
-#endif
- this->location_ = reinterpret_cast<T**>(handle.location());
- }
+// ----------------------------------------------------------------------------
+// Base class for Handle instantiations. Don't use directly.
+class HandleBase {
+ public:
+ V8_INLINE explicit HandleBase(Object** location) : location_(location) {}
+ V8_INLINE explicit HandleBase(Object* object, Isolate* isolate);
- // Constructor for handling automatic up casting.
- // Ex. MaybeHandle<JSArray> can be passed when Handle<Object> is expected.
- template <class S> MaybeHandle(MaybeHandle<S> maybe_handle) {
-#ifdef DEBUG
- T* a = NULL;
- S* b = NULL;
- a = b; // Fake assignment to enforce type checks.
- USE(a);
-#endif
- location_ = reinterpret_cast<T**>(maybe_handle.location_);
+ // Check if this handle refers to the exact same object as the other handle.
+ V8_INLINE bool is_identical_to(const HandleBase that) const {
+ // Dereferencing deferred handles to check object equality is safe.
+ SLOW_DCHECK((this->location_ == nullptr ||
+ this->IsDereferenceAllowed(NO_DEFERRED_CHECK)) &&
+ (that.location_ == nullptr ||
+ that.IsDereferenceAllowed(NO_DEFERRED_CHECK)));
+ if (this->location_ == that.location_) return true;
+ if (this->location_ == NULL || that.location_ == NULL) return false;
+ return *this->location_ == *that.location_;
}
- INLINE(void Assert() const) { DCHECK(location_ != NULL); }
- INLINE(void Check() const) { CHECK(location_ != NULL); }
-
- INLINE(Handle<T> ToHandleChecked()) const {
- Check();
- return Handle<T>(location_);
- }
+ V8_INLINE bool is_null() const { return location_ == nullptr; }
- // Convert to a Handle with a type that can be upcasted to.
- template <class S>
- V8_INLINE bool ToHandle(Handle<S>* out) const {
- if (location_ == NULL) {
- *out = Handle<T>::null();
- return false;
- } else {
- *out = Handle<T>(location_);
- return true;
- }
+ protected:
+ // Provides the C++ dereference operator.
+ V8_INLINE Object* operator*() const {
+ SLOW_DCHECK(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
+ return *location_;
}
- bool is_null() const { return location_ == NULL; }
-
- template <typename S>
- bool operator==(MaybeHandle<S> that) const {
- return this->location_ == that.location_;
- }
- template <typename S>
- bool operator!=(MaybeHandle<S> that) const {
- return !(*this == that);
+ // Returns the address to where the raw pointer is stored.
+ V8_INLINE Object** location() const {
+ SLOW_DCHECK(location_ == nullptr ||
+ IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
+ return location_;
}
+ enum DereferenceCheckMode { INCLUDE_DEFERRED_CHECK, NO_DEFERRED_CHECK };
+#ifdef DEBUG
+ bool IsDereferenceAllowed(DereferenceCheckMode mode) const;
+#else
+ V8_INLINE
+ bool IsDereferenceAllowed(DereferenceCheckMode mode) const { return true; }
+#endif // DEBUG
- protected:
- T** location_;
-
- // MaybeHandles of different classes are allowed to access each
- // other's location_.
- template<class S> friend class MaybeHandle;
- template <typename S>
- friend size_t hash_value(MaybeHandle<S>);
+ Object** location_;
};
-template <typename S>
-inline size_t hash_value(MaybeHandle<S> maybe_handle) {
- return bit_cast<size_t>(maybe_handle.location_);
-}
-
// ----------------------------------------------------------------------------
// A Handle provides a reference to an object that survives relocation by
// the garbage collector.
// Handles are only valid within a HandleScope.
// When a handle is created for an object a cell is allocated in the heap.
-
-template<typename T>
-class Handle {
+template <typename T>
+class Handle final : public HandleBase {
public:
- INLINE(explicit Handle(T** location)) { location_ = location; }
- INLINE(explicit Handle(T* obj));
- INLINE(Handle(T* obj, Isolate* isolate));
-
- // TODO(yangguo): Values that contain empty handles should be declared as
- // MaybeHandle to force validation before being used as handles.
- INLINE(Handle()) : location_(NULL) { }
+ V8_INLINE explicit Handle(T** location = nullptr)
+ : HandleBase(reinterpret_cast<Object**>(location)) {
+ Object* a = nullptr;
+ T* b = nullptr;
+ a = b; // Fake assignment to enforce type checks.
+ USE(a);
+ }
+ V8_INLINE explicit Handle(T* object) : Handle(object, object->GetIsolate()) {}
+ V8_INLINE Handle(T* object, Isolate* isolate) : HandleBase(object, isolate) {}
// Constructor for handling automatic up casting.
// Ex. Handle<JSFunction> can be passed when Handle<Object> is expected.
- template <class S> Handle(Handle<S> handle) {
-#ifdef DEBUG
- T* a = NULL;
- S* b = NULL;
+ template <typename S>
+ V8_INLINE Handle(Handle<S> handle)
+ : HandleBase(handle) {
+ T* a = nullptr;
+ S* b = nullptr;
a = b; // Fake assignment to enforce type checks.
USE(a);
-#endif
- location_ = reinterpret_cast<T**>(handle.location_);
}
- INLINE(T* operator->() const) { return operator*(); }
-
- // Check if this handle refers to the exact same object as the other handle.
- INLINE(bool is_identical_to(const Handle<T> other) const);
+ V8_INLINE T* operator->() const { return operator*(); }
// Provides the C++ dereference operator.
- INLINE(T* operator*() const);
+ V8_INLINE T* operator*() const {
+ return reinterpret_cast<T*>(HandleBase::operator*());
+ }
// Returns the address to where the raw pointer is stored.
- INLINE(T** location() const);
+ V8_INLINE T** location() const {
+ return reinterpret_cast<T**>(HandleBase::location());
+ }
- template <class S> static Handle<T> cast(Handle<S> that) {
+ template <typename S>
+ static const Handle<T> cast(Handle<S> that) {
T::cast(*reinterpret_cast<T**>(that.location_));
return Handle<T>(reinterpret_cast<T**>(that.location_));
}
// TODO(yangguo): Values that contain empty handles should be declared as
// MaybeHandle to force validation before being used as handles.
- static Handle<T> null() { return Handle<T>(); }
- bool is_null() const { return location_ == NULL; }
-
- // Closes the given scope, but lets this handle escape. See
- // implementation in api.h.
- inline Handle<T> EscapeFrom(v8::EscapableHandleScope* scope);
-
-#ifdef DEBUG
- enum DereferenceCheckMode { INCLUDE_DEFERRED_CHECK, NO_DEFERRED_CHECK };
-
- bool IsDereferenceAllowed(DereferenceCheckMode mode) const;
-#endif // DEBUG
+ static const Handle<T> null() { return Handle<T>(); }
private:
- T** location_;
-
// Handles of different classes are allowed to access each other's location_.
- template<class S> friend class Handle;
+ template <typename>
+ friend class Handle;
+ // MaybeHandle is allowed to access location_.
+ template <typename>
+ friend class MaybeHandle;
};
+template <typename T>
+V8_INLINE Handle<T> handle(T* object, Isolate* isolate) {
+ return Handle<T>(object, isolate);
+}
-// Convenience wrapper.
-template<class T>
-inline Handle<T> handle(T* t, Isolate* isolate) {
- return Handle<T>(t, isolate);
+template <typename T>
+V8_INLINE Handle<T> handle(T* object) {
+ return Handle<T>(object);
}
-// Convenience wrapper.
-template<class T>
-inline Handle<T> handle(T* t) {
- return Handle<T>(t, t->GetIsolate());
-}
+// ----------------------------------------------------------------------------
+// A Handle can be converted into a MaybeHandle. Converting a MaybeHandle
+// into a Handle requires checking that it does not point to NULL. This
+// ensures NULL checks before use.
+// Do not use MaybeHandle as argument type.
+template <typename T>
+class MaybeHandle final {
+ public:
+ V8_INLINE MaybeHandle() {}
+ V8_INLINE ~MaybeHandle() {}
+ // Constructor for handling automatic up casting from Handle.
+ // Ex. Handle<JSArray> can be passed when MaybeHandle<Object> is expected.
+ template <typename S>
+ V8_INLINE MaybeHandle(Handle<S> handle)
+ : location_(reinterpret_cast<T**>(handle.location_)) {
+ T* a = nullptr;
+ S* b = nullptr;
+ a = b; // Fake assignment to enforce type checks.
+ USE(a);
+ }
-class DeferredHandles;
-class HandleScopeImplementer;
+ // Constructor for handling automatic up casting.
+ // Ex. MaybeHandle<JSArray> can be passed when Handle<Object> is expected.
+ template <typename S>
+ V8_INLINE MaybeHandle(MaybeHandle<S> maybe_handle)
+ : location_(reinterpret_cast<T**>(maybe_handle.location_)) {
+ T* a = nullptr;
+ S* b = nullptr;
+ a = b; // Fake assignment to enforce type checks.
+ USE(a);
+ }
+
+ V8_INLINE void Assert() const { DCHECK_NOT_NULL(location_); }
+ V8_INLINE void Check() const { CHECK_NOT_NULL(location_); }
+
+ V8_INLINE Handle<T> ToHandleChecked() const {
+ Check();
+ return Handle<T>(location_);
+ }
+
+ // Convert to a Handle with a type that can be upcasted to.
+ template <typename S>
+ V8_INLINE bool ToHandle(Handle<S>* out) const {
+ if (location_ == nullptr) {
+ *out = Handle<T>::null();
+ return false;
+ } else {
+ *out = Handle<T>(location_);
+ return true;
+ }
+ }
+
+ bool is_null() const { return location_ == nullptr; }
+
+ template <typename S>
+ V8_INLINE bool operator==(MaybeHandle<S> that) const {
+ return this->location_ == that.location_;
+ }
+ template <typename S>
+ V8_INLINE bool operator!=(MaybeHandle<S> that) const {
+ return this->location_ != that.location_;
+ }
+
+ protected:
+ T** location_ = nullptr;
+
+ // MaybeHandles of different classes are allowed to access each
+ // other's location_.
+ template <typename>
+ friend class MaybeHandle;
+ // Utility functions are allowed to access location_.
+ template <typename S>
+ friend size_t hash_value(MaybeHandle<S>);
+};
+
+template <typename T>
+V8_INLINE size_t hash_value(MaybeHandle<T> maybe_handle) {
+ uintptr_t v = bit_cast<uintptr_t>(maybe_handle.location_);
+ DCHECK_EQ(0u, v & ((1u << kPointerSizeLog2) - 1));
+ return v >> kPointerSizeLog2;
+}
// A stack-allocated class that governs a number of local handles.
@@ -241,7 +285,7 @@ class HandleScope {
Object** prev_limit);
// Extend the handle scope making room for more handles.
- static internal::Object** Extend(Isolate* isolate);
+ static Object** Extend(Isolate* isolate);
#ifdef ENABLE_HANDLE_ZAPPING
// Zaps the handles in the half-open interval [start, end).
@@ -249,16 +293,13 @@ class HandleScope {
#endif
friend class v8::HandleScope;
- friend class v8::internal::DeferredHandles;
- friend class v8::internal::HandleScopeImplementer;
- friend class v8::internal::Isolate;
+ friend class DeferredHandles;
+ friend class HandleScopeImplementer;
+ friend class Isolate;
};
-class DeferredHandles;
-
-
-class DeferredHandleScope {
+class DeferredHandleScope final {
public:
explicit DeferredHandleScope(Isolate* isolate);
// The DeferredHandles object returned stores the Handles created
@@ -283,7 +324,7 @@ class DeferredHandleScope {
// Seal off the current HandleScope so that new handles can only be created
// if a new HandleScope is entered.
-class SealHandleScope BASE_EMBEDDED {
+class SealHandleScope final {
public:
#ifndef DEBUG
explicit SealHandleScope(Isolate* isolate) {}
@@ -298,9 +339,10 @@ class SealHandleScope BASE_EMBEDDED {
#endif
};
-struct HandleScopeData {
- internal::Object** next;
- internal::Object** limit;
+
+struct HandleScopeData final {
+ Object** next;
+ Object** limit;
int level;
void Initialize() {
@@ -309,6 +351,7 @@ struct HandleScopeData {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HANDLES_H_
diff --git a/deps/v8/src/harmony-array-includes.js b/deps/v8/src/harmony-array-includes.js
index b133f1ec8c..124edf62ec 100644
--- a/deps/v8/src/harmony-array-includes.js
+++ b/deps/v8/src/harmony-array-includes.js
@@ -14,12 +14,9 @@ var GlobalArray = global.Array;
// Proposed for ES7
// https://github.com/tc39/Array.prototype.includes
-// 6e3b78c927aeda20b9d40e81303f9d44596cd904
-function ArrayIncludes(searchElement, fromIndex) {
- var array = $toObject(this);
- var len = $toLength(array.length);
-
- if (len === 0) {
+// 46c7532ec8499dea3e51aeb940d09e07547ed3f5
+function InnerArrayIncludes(searchElement, fromIndex, array, length) {
+ if (length === 0) {
return false;
}
@@ -29,13 +26,13 @@ function ArrayIncludes(searchElement, fromIndex) {
if (n >= 0) {
k = n;
} else {
- k = len + n;
+ k = length + n;
if (k < 0) {
k = 0;
}
}
- while (k < len) {
+ while (k < length) {
var elementK = array[k];
if ($sameValueZero(searchElement, elementK)) {
return true;
@@ -47,13 +44,65 @@ function ArrayIncludes(searchElement, fromIndex) {
return false;
}
+
+function ArrayIncludes(searchElement, fromIndex) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.includes");
+
+ var array = TO_OBJECT(this);
+ var length = $toLength(array.length);
+
+ return InnerArrayIncludes(searchElement, fromIndex, array, length);
+}
+
+
+function TypedArrayIncludes(searchElement, fromIndex) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayIncludes(searchElement, fromIndex, this, length);
+}
+
// -------------------------------------------------------------------
%FunctionSetLength(ArrayIncludes, 1);
+%FunctionSetLength(TypedArrayIncludes, 1);
-// Set up the non-enumerable functions on the Array prototype object.
+// Set up the non-enumerable function on the Array prototype object.
utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"includes", ArrayIncludes
]);
+// Set up the non-enumerable function on the typed array prototypes.
+// This duplicates some of the machinery in harmony-typedarray.js in order to
+// keep includes behind the separate --harmony-array-includes flag.
+// TODO(littledan): Fix the TypedArray proto chain (bug v8:4085).
+
+macro TYPED_ARRAYS(FUNCTION)
+// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
+FUNCTION(Uint8Array)
+FUNCTION(Int8Array)
+FUNCTION(Uint16Array)
+FUNCTION(Int16Array)
+FUNCTION(Uint32Array)
+FUNCTION(Int32Array)
+FUNCTION(Float32Array)
+FUNCTION(Float64Array)
+FUNCTION(Uint8ClampedArray)
+endmacro
+
+macro DECLARE_GLOBALS(NAME)
+var GlobalNAME = global.NAME;
+endmacro
+
+macro EXTEND_TYPED_ARRAY(NAME)
+// Set up non-enumerable functions on the prototype object.
+utils.InstallFunctions(GlobalNAME.prototype, DONT_ENUM, [
+ "includes", TypedArrayIncludes
+]);
+endmacro
+
+TYPED_ARRAYS(DECLARE_GLOBALS)
+TYPED_ARRAYS(EXTEND_TYPED_ARRAY)
+
})
diff --git a/deps/v8/src/harmony-array.js b/deps/v8/src/harmony-array.js
index e94134b81a..49176460ad 100644
--- a/deps/v8/src/harmony-array.js
+++ b/deps/v8/src/harmony-array.js
@@ -11,15 +11,15 @@
// -------------------------------------------------------------------
// Imports
-var GlobalArray = global.Array;
-var GlobalSymbol = global.Symbol;
-
var GetIterator;
var GetMethod;
+var GlobalArray = global.Array;
+var GlobalSymbol = global.Symbol;
var MathMax;
var MathMin;
var ObjectIsFrozen;
var ObjectDefineProperty;
+var ToNumber;
utils.Import(function(from) {
GetIterator = from.GetIterator;
@@ -28,6 +28,7 @@ utils.Import(function(from) {
MathMin = from.MathMin;
ObjectIsFrozen = from.ObjectIsFrozen;
ObjectDefineProperty = from.ObjectDefineProperty;
+ ToNumber = from.ToNumber;
});
// -------------------------------------------------------------------
@@ -83,7 +84,7 @@ function InnerArrayCopyWithin(target, start, end, array, length) {
function ArrayCopyWithin(target, start, end) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.copyWithin");
- var array = TO_OBJECT_INLINE(this);
+ var array = TO_OBJECT(this);
var length = $toLength(array.length);
return InnerArrayCopyWithin(target, start, end, array, length);
@@ -103,7 +104,7 @@ function InnerArrayFind(predicate, thisArg, array, length) {
for (var i = 0; i < length; i++) {
var element = array[i];
- var newThisArg = needs_wrapper ? $toObject(thisArg) : thisArg;
+ var newThisArg = needs_wrapper ? TO_OBJECT(thisArg) : thisArg;
if (%_CallFunction(newThisArg, element, i, array, predicate)) {
return element;
}
@@ -116,7 +117,7 @@ function InnerArrayFind(predicate, thisArg, array, length) {
function ArrayFind(predicate, thisArg) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.find");
- var array = $toObject(this);
+ var array = TO_OBJECT(this);
var length = $toInteger(array.length);
return InnerArrayFind(predicate, thisArg, array, length);
@@ -136,7 +137,7 @@ function InnerArrayFindIndex(predicate, thisArg, array, length) {
for (var i = 0; i < length; i++) {
var element = array[i];
- var newThisArg = needs_wrapper ? $toObject(thisArg) : thisArg;
+ var newThisArg = needs_wrapper ? TO_OBJECT(thisArg) : thisArg;
if (%_CallFunction(newThisArg, element, i, array, predicate)) {
return i;
}
@@ -149,7 +150,7 @@ function InnerArrayFindIndex(predicate, thisArg, array, length) {
function ArrayFindIndex(predicate, thisArg) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.findIndex");
- var array = $toObject(this);
+ var array = TO_OBJECT(this);
var length = $toInteger(array.length);
return InnerArrayFindIndex(predicate, thisArg, array, length);
@@ -187,7 +188,7 @@ function InnerArrayFill(value, start, end, array, length) {
function ArrayFill(value, start, end) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.fill");
- var array = $toObject(this);
+ var array = TO_OBJECT(this);
var length = TO_UINT32(array.length);
return InnerArrayFill(value, start, end, array, length);
@@ -205,7 +206,7 @@ function AddArrayElement(constructor, array, i, value) {
// ES6, draft 10-14-14, section 22.1.2.1
function ArrayFrom(arrayLike, mapfn, receiver) {
- var items = $toObject(arrayLike);
+ var items = TO_OBJECT(arrayLike);
var mapping = !IS_UNDEFINED(mapfn);
if (mapping) {
@@ -215,7 +216,7 @@ function ArrayFrom(arrayLike, mapfn, receiver) {
if (IS_NULL(receiver)) {
receiver = UNDEFINED;
} else if (!IS_UNDEFINED(receiver)) {
- receiver = TO_OBJECT_INLINE(receiver);
+ receiver = TO_OBJECT(receiver);
}
}
}
diff --git a/deps/v8/src/harmony-atomics.js b/deps/v8/src/harmony-atomics.js
index aa81822d1e..d4e069641a 100644
--- a/deps/v8/src/harmony-atomics.js
+++ b/deps/v8/src/harmony-atomics.js
@@ -12,37 +12,53 @@
// Imports
var GlobalObject = global.Object;
+var MathMax;
+var ToNumber;
+
+utils.Import(function(from) {
+ MathMax = from.MathMax;
+ ToNumber = from.ToNumber;
+});
// -------------------------------------------------------------------
function CheckSharedTypedArray(sta) {
- if (!%_IsSharedTypedArray(sta)) {
+ if (!%IsSharedTypedArray(sta)) {
throw MakeTypeError(kNotSharedTypedArray, sta);
}
}
function CheckSharedIntegerTypedArray(ia) {
- if (!%_IsSharedIntegerTypedArray(ia)) {
+ if (!%IsSharedIntegerTypedArray(ia)) {
throw MakeTypeError(kNotIntegerSharedTypedArray, ia);
}
}
+function CheckSharedInteger32TypedArray(ia) {
+ CheckSharedIntegerTypedArray(ia);
+ if (%_ClassOf(ia) !== 'Int32Array') {
+ throw MakeTypeError(kNotInt32SharedTypedArray, ia);
+ }
+}
+
//-------------------------------------------------------------------
function AtomicsCompareExchangeJS(sta, index, oldValue, newValue) {
CheckSharedTypedArray(sta);
index = $toInteger(index);
- if (index < 0 || index >= sta.length) {
+ if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
return UNDEFINED;
}
+ oldValue = ToNumber(oldValue);
+ newValue = ToNumber(newValue);
return %_AtomicsCompareExchange(sta, index, oldValue, newValue);
}
function AtomicsLoadJS(sta, index) {
CheckSharedTypedArray(sta);
index = $toInteger(index);
- if (index < 0 || index >= sta.length) {
+ if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
return UNDEFINED;
}
return %_AtomicsLoad(sta, index);
@@ -51,63 +67,70 @@ function AtomicsLoadJS(sta, index) {
function AtomicsStoreJS(sta, index, value) {
CheckSharedTypedArray(sta);
index = $toInteger(index);
- if (index < 0 || index >= sta.length) {
+ if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
return UNDEFINED;
}
+ value = ToNumber(value);
return %_AtomicsStore(sta, index, value);
}
function AtomicsAddJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
index = $toInteger(index);
- if (index < 0 || index >= ia.length) {
+ if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
+ value = ToNumber(value);
return %_AtomicsAdd(ia, index, value);
}
function AtomicsSubJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
index = $toInteger(index);
- if (index < 0 || index >= ia.length) {
+ if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
+ value = ToNumber(value);
return %_AtomicsSub(ia, index, value);
}
function AtomicsAndJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
index = $toInteger(index);
- if (index < 0 || index >= ia.length) {
+ if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
+ value = ToNumber(value);
return %_AtomicsAnd(ia, index, value);
}
function AtomicsOrJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
index = $toInteger(index);
- if (index < 0 || index >= ia.length) {
+ if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
+ value = ToNumber(value);
return %_AtomicsOr(ia, index, value);
}
function AtomicsXorJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
index = $toInteger(index);
- if (index < 0 || index >= ia.length) {
+ if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
+ value = ToNumber(value);
return %_AtomicsXor(ia, index, value);
}
function AtomicsExchangeJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
index = $toInteger(index);
- if (index < 0 || index >= ia.length) {
+ if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
+ value = ToNumber(value);
return %_AtomicsExchange(ia, index, value);
}
@@ -115,6 +138,50 @@ function AtomicsIsLockFreeJS(size) {
return %_AtomicsIsLockFree(size);
}
+// Futexes
+
+function AtomicsFutexWaitJS(ia, index, value, timeout) {
+ CheckSharedInteger32TypedArray(ia);
+ index = $toInteger(index);
+ if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
+ return UNDEFINED;
+ }
+ if (IS_UNDEFINED(timeout)) {
+ timeout = INFINITY;
+ } else {
+ timeout = ToNumber(timeout);
+ if (NUMBER_IS_NAN(timeout)) {
+ timeout = INFINITY;
+ } else {
+ timeout = MathMax(0, timeout);
+ }
+ }
+ return %AtomicsFutexWait(ia, index, value, timeout);
+}
+
+function AtomicsFutexWakeJS(ia, index, count) {
+ CheckSharedInteger32TypedArray(ia);
+ index = $toInteger(index);
+ if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
+ return UNDEFINED;
+ }
+ count = MathMax(0, $toInteger(count));
+ return %AtomicsFutexWake(ia, index, count);
+}
+
+function AtomicsFutexWakeOrRequeueJS(ia, index1, count, value, index2) {
+ CheckSharedInteger32TypedArray(ia);
+ index1 = $toInteger(index1);
+ count = MathMax(0, $toInteger(count));
+ value = TO_INT32(value);
+ index2 = $toInteger(index2);
+ if (index1 < 0 || index1 >= %_TypedArrayGetLength(ia) ||
+ index2 < 0 || index2 >= %_TypedArrayGetLength(ia)) {
+ return UNDEFINED;
+ }
+ return %AtomicsFutexWakeOrRequeue(ia, index1, count, value, index2);
+}
+
// -------------------------------------------------------------------
function AtomicsConstructor() {}
@@ -127,6 +194,13 @@ var Atomics = new AtomicsConstructor();
%AddNamedProperty(Atomics, symbolToStringTag, "Atomics", READ_ONLY | DONT_ENUM);
+// These must match the values in src/futex-emulation.h
+utils.InstallConstants(Atomics, [
+ "OK", 0,
+ "NOTEQUAL", -1,
+ "TIMEDOUT", -2,
+]);
+
utils.InstallFunctions(Atomics, DONT_ENUM, [
"compareExchange", AtomicsCompareExchangeJS,
"load", AtomicsLoadJS,
@@ -138,6 +212,9 @@ utils.InstallFunctions(Atomics, DONT_ENUM, [
"xor", AtomicsXorJS,
"exchange", AtomicsExchangeJS,
"isLockFree", AtomicsIsLockFreeJS,
+ "futexWait", AtomicsFutexWaitJS,
+ "futexWake", AtomicsFutexWakeJS,
+ "futexWakeOrRequeue", AtomicsFutexWakeOrRequeueJS,
]);
})
diff --git a/deps/v8/src/harmony-object-observe.js b/deps/v8/src/harmony-object-observe.js
new file mode 100644
index 0000000000..44006cd2e9
--- /dev/null
+++ b/deps/v8/src/harmony-object-observe.js
@@ -0,0 +1,14 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+utils.InstallFunctions(global.Object, DONT_ENUM, $observeObjectMethods);
+utils.InstallFunctions(global.Array, DONT_ENUM, $observeArrayMethods);
+
+})
diff --git a/deps/v8/src/harmony-object.js b/deps/v8/src/harmony-object.js
index 382f7f4252..12f25552e4 100644
--- a/deps/v8/src/harmony-object.js
+++ b/deps/v8/src/harmony-object.js
@@ -13,7 +13,6 @@
// Imports
var GlobalObject = global.Object;
-
var OwnPropertyKeys;
utils.Import(function(from) {
@@ -24,7 +23,7 @@ utils.Import(function(from) {
// ES6, draft 04-03-15, section 19.1.2.1
function ObjectAssign(target, sources) {
- var to = TO_OBJECT_INLINE(target);
+ var to = TO_OBJECT(target);
var argsLen = %_ArgumentsLength();
if (argsLen < 2) return to;
@@ -34,7 +33,7 @@ function ObjectAssign(target, sources) {
continue;
}
- var from = TO_OBJECT_INLINE(nextSource);
+ var from = TO_OBJECT(nextSource);
var keys = OwnPropertyKeys(from);
var len = keys.length;
diff --git a/deps/v8/src/harmony-regexp.js b/deps/v8/src/harmony-regexp.js
index f4e1cb0f3f..150716744d 100644
--- a/deps/v8/src/harmony-regexp.js
+++ b/deps/v8/src/harmony-regexp.js
@@ -8,7 +8,15 @@
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalRegExp = global.RegExp;
+var ToString;
+
+utils.Import(function(from) {
+ ToString = from.ToString;
+});
// -------------------------------------------------------------------
@@ -16,7 +24,7 @@ var GlobalRegExp = global.RegExp;
// + https://bugs.ecmascript.org/show_bug.cgi?id=3423
function RegExpGetFlags() {
if (!IS_SPEC_OBJECT(this)) {
- throw MakeTypeError(kFlagsGetterNonObject, $toString(this));
+ throw MakeTypeError(kFlagsGetterNonObject, ToString(this));
}
var result = '';
if (this.global) result += 'g';
diff --git a/deps/v8/src/harmony-simd.js b/deps/v8/src/harmony-simd.js
new file mode 100644
index 0000000000..3cc18c2e90
--- /dev/null
+++ b/deps/v8/src/harmony-simd.js
@@ -0,0 +1,682 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var GlobalSIMD = global.SIMD;
+
+macro SIMD_FLOAT_TYPES(FUNCTION)
+FUNCTION(Float32x4, float32x4, 4)
+endmacro
+
+macro SIMD_INT_TYPES(FUNCTION)
+FUNCTION(Int32x4, int32x4, 4)
+FUNCTION(Int16x8, int16x8, 8)
+FUNCTION(Int8x16, int8x16, 16)
+endmacro
+
+macro SIMD_BOOL_TYPES(FUNCTION)
+FUNCTION(Bool32x4, bool32x4, 4)
+FUNCTION(Bool16x8, bool16x8, 8)
+FUNCTION(Bool8x16, bool8x16, 16)
+endmacro
+
+macro SIMD_ALL_TYPES(FUNCTION)
+SIMD_FLOAT_TYPES(FUNCTION)
+SIMD_INT_TYPES(FUNCTION)
+SIMD_BOOL_TYPES(FUNCTION)
+endmacro
+
+macro DECLARE_GLOBALS(NAME, TYPE, LANES)
+var GlobalNAME = GlobalSIMD.NAME;
+endmacro
+
+SIMD_ALL_TYPES(DECLARE_GLOBALS)
+
+macro DECLARE_COMMON_FUNCTIONS(NAME, TYPE, LANES)
+function NAMECheckJS(a) {
+ return %NAMECheck(a);
+}
+
+function NAMEToString() {
+ if (typeof(this) !== 'TYPE' && %_ClassOf(this) !== 'NAME') {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "NAME.prototype.toString", this);
+ }
+ var value = %_ValueOf(this);
+ var str = "SIMD.NAME(";
+ str += %NAMEExtractLane(value, 0);
+ for (var i = 1; i < LANES; i++) {
+ str += ", " + %NAMEExtractLane(value, i);
+ }
+ return str + ")";
+}
+
+function NAMEToLocaleString() {
+ if (typeof(this) !== 'TYPE' && %_ClassOf(this) !== 'NAME') {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "NAME.prototype.toLocaleString", this);
+ }
+ var value = %_ValueOf(this);
+ var str = "SIMD.NAME(";
+ str += %NAMEExtractLane(value, 0).toLocaleString();
+ for (var i = 1; i < LANES; i++) {
+ str += ", " + %NAMEExtractLane(value, i).toLocaleString();
+ }
+ return str + ")";
+}
+
+function NAMEValueOf() {
+ if (typeof(this) !== 'TYPE' && %_ClassOf(this) !== 'NAME') {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "NAME.prototype.valueOf", this);
+ }
+ return %_ValueOf(this);
+}
+
+function NAMEExtractLaneJS(instance, lane) {
+ return %NAMEExtractLane(instance, lane);
+}
+
+function NAMEEqualJS(a, b) {
+ return %NAMEEqual(a, b);
+}
+
+function NAMENotEqualJS(a, b) {
+ return %NAMENotEqual(a, b);
+}
+endmacro
+
+SIMD_ALL_TYPES(DECLARE_COMMON_FUNCTIONS)
+
+macro DECLARE_INT_FUNCTIONS(NAME, TYPE, LANES)
+function NAMEShiftLeftByScalarJS(instance, shift) {
+ return %NAMEShiftLeftByScalar(instance, shift);
+}
+
+function NAMEShiftRightLogicalByScalarJS(instance, shift) {
+ return %NAMEShiftRightLogicalByScalar(instance, shift);
+}
+
+function NAMEShiftRightArithmeticByScalarJS(instance, shift) {
+ return %NAMEShiftRightArithmeticByScalar(instance, shift);
+}
+endmacro
+
+SIMD_INT_TYPES(DECLARE_INT_FUNCTIONS)
+
+macro DECLARE_BOOL_FUNCTIONS(NAME, TYPE, LANES)
+function NAMEReplaceLaneJS(instance, lane, value) {
+ return %NAMEReplaceLane(instance, lane, value);
+}
+
+function NAMEAnyTrueJS(s) {
+ return %NAMEAnyTrue(s);
+}
+
+function NAMEAllTrueJS(s) {
+ return %NAMEAllTrue(s);
+}
+endmacro
+
+SIMD_BOOL_TYPES(DECLARE_BOOL_FUNCTIONS)
+
+macro SIMD_UNSIGNED_INT_TYPES(FUNCTION)
+FUNCTION(Int16x8)
+FUNCTION(Int8x16)
+endmacro
+
+macro DECLARE_UNSIGNED_INT_FUNCTIONS(NAME)
+function NAMEUnsignedExtractLaneJS(instance, lane) {
+ return %NAMEUnsignedExtractLane(instance, lane);
+}
+endmacro
+
+SIMD_UNSIGNED_INT_TYPES(DECLARE_UNSIGNED_INT_FUNCTIONS)
+
+macro SIMD_NUMERIC_TYPES(FUNCTION)
+SIMD_FLOAT_TYPES(FUNCTION)
+SIMD_INT_TYPES(FUNCTION)
+endmacro
+
+macro DECLARE_NUMERIC_FUNCTIONS(NAME, TYPE, LANES)
+function NAMEReplaceLaneJS(instance, lane, value) {
+ return %NAMEReplaceLane(instance, lane, TO_NUMBER_INLINE(value));
+}
+
+function NAMESelectJS(selector, a, b) {
+ return %NAMESelect(selector, a, b);
+}
+
+function NAMENegJS(a) {
+ return %NAMENeg(a);
+}
+
+function NAMEAddJS(a, b) {
+ return %NAMEAdd(a, b);
+}
+
+function NAMESubJS(a, b) {
+ return %NAMESub(a, b);
+}
+
+function NAMEMulJS(a, b) {
+ return %NAMEMul(a, b);
+}
+
+function NAMEMinJS(a, b) {
+ return %NAMEMin(a, b);
+}
+
+function NAMEMaxJS(a, b) {
+ return %NAMEMax(a, b);
+}
+
+function NAMELessThanJS(a, b) {
+ return %NAMELessThan(a, b);
+}
+
+function NAMELessThanOrEqualJS(a, b) {
+ return %NAMELessThanOrEqual(a, b);
+}
+
+function NAMEGreaterThanJS(a, b) {
+ return %NAMEGreaterThan(a, b);
+}
+
+function NAMEGreaterThanOrEqualJS(a, b) {
+ return %NAMEGreaterThanOrEqual(a, b);
+}
+endmacro
+
+SIMD_NUMERIC_TYPES(DECLARE_NUMERIC_FUNCTIONS)
+
+macro SIMD_LOGICAL_TYPES(FUNCTION)
+SIMD_INT_TYPES(FUNCTION)
+SIMD_BOOL_TYPES(FUNCTION)
+endmacro
+
+macro DECLARE_LOGICAL_FUNCTIONS(NAME, TYPE, LANES)
+function NAMEAndJS(a, b) {
+ return %NAMEAnd(a, b);
+}
+
+function NAMEOrJS(a, b) {
+ return %NAMEOr(a, b);
+}
+
+function NAMEXorJS(a, b) {
+ return %NAMEXor(a, b);
+}
+
+function NAMENotJS(a) {
+ return %NAMENot(a);
+}
+endmacro
+
+SIMD_LOGICAL_TYPES(DECLARE_LOGICAL_FUNCTIONS)
+
+macro SIMD_FROM_TYPES(FUNCTION)
+FUNCTION(Float32x4, Int32x4)
+FUNCTION(Int32x4, Float32x4)
+endmacro
+
+macro DECLARE_FROM_FUNCTIONS(TO, FROM)
+function TOFromFROMJS(a) {
+ return %TOFromFROM(a);
+}
+endmacro
+
+SIMD_FROM_TYPES(DECLARE_FROM_FUNCTIONS)
+
+macro SIMD_FROM_BITS_TYPES(FUNCTION)
+FUNCTION(Float32x4, Int32x4)
+FUNCTION(Float32x4, Int16x8)
+FUNCTION(Float32x4, Int8x16)
+FUNCTION(Int32x4, Float32x4)
+FUNCTION(Int32x4, Int16x8)
+FUNCTION(Int32x4, Int8x16)
+FUNCTION(Int16x8, Float32x4)
+FUNCTION(Int16x8, Int32x4)
+FUNCTION(Int16x8, Int8x16)
+FUNCTION(Int8x16, Float32x4)
+FUNCTION(Int8x16, Int32x4)
+FUNCTION(Int8x16, Int16x8)
+endmacro
+
+macro DECLARE_FROM_BITS_FUNCTIONS(TO, FROM)
+function TOFromFROMBitsJS(a) {
+ return %TOFromFROMBits(a);
+}
+endmacro
+
+SIMD_FROM_BITS_TYPES(DECLARE_FROM_BITS_FUNCTIONS)
+
+//-------------------------------------------------------------------
+
+function Float32x4Constructor(c0, c1, c2, c3) {
+ if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Float32x4");
+ return %CreateFloat32x4(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
+ TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3));
+}
+
+
+function Float32x4Splat(s) {
+ return %CreateFloat32x4(s, s, s, s);
+}
+
+
+function Float32x4AbsJS(a) {
+ return %Float32x4Abs(a);
+}
+
+
+function Float32x4SqrtJS(a) {
+ return %Float32x4Sqrt(a);
+}
+
+
+function Float32x4RecipApproxJS(a) {
+ return %Float32x4RecipApprox(a);
+}
+
+
+function Float32x4RecipSqrtApproxJS(a) {
+ return %Float32x4RecipSqrtApprox(a);
+}
+
+
+function Float32x4DivJS(a, b) {
+ return %Float32x4Div(a, b);
+}
+
+
+function Float32x4MinNumJS(a, b) {
+ return %Float32x4MinNum(a, b);
+}
+
+
+function Float32x4MaxNumJS(a, b) {
+ return %Float32x4MaxNum(a, b);
+}
+
+
+function Float32x4SwizzleJS(a, c0, c1, c2, c3) {
+ return %Float32x4Swizzle(a, c0, c1, c2, c3);
+}
+
+
+function Float32x4ShuffleJS(a, b, c0, c1, c2, c3) {
+ return %Float32x4Shuffle(a, b, c0, c1, c2, c3);
+}
+
+
+function Int32x4Constructor(c0, c1, c2, c3) {
+ if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Int32x4");
+ return %CreateInt32x4(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
+ TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3));
+}
+
+
+function Int32x4Splat(s) {
+ return %CreateInt32x4(s, s, s, s);
+}
+
+
+function Int32x4SwizzleJS(a, c0, c1, c2, c3) {
+ return %Int32x4Swizzle(a, c0, c1, c2, c3);
+}
+
+
+function Int32x4ShuffleJS(a, b, c0, c1, c2, c3) {
+ return %Int32x4Shuffle(a, b, c0, c1, c2, c3);
+}
+
+
+function Bool32x4Constructor(c0, c1, c2, c3) {
+ if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Bool32x4");
+ return %CreateBool32x4(c0, c1, c2, c3);
+}
+
+
+function Bool32x4Splat(s) {
+ return %CreateBool32x4(s, s, s, s);
+}
+
+function Bool32x4SwizzleJS(a, c0, c1, c2, c3) {
+ return %Bool32x4Swizzle(a, c0, c1, c2, c3);
+}
+
+
+function Bool32x4ShuffleJS(a, b, c0, c1, c2, c3) {
+ return %Bool32x4Shuffle(a, b, c0, c1, c2, c3);
+}
+
+
+function Int16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
+ if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Int16x8");
+ return %CreateInt16x8(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
+ TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3),
+ TO_NUMBER_INLINE(c4), TO_NUMBER_INLINE(c5),
+ TO_NUMBER_INLINE(c6), TO_NUMBER_INLINE(c7));
+}
+
+
+function Int16x8Splat(s) {
+ return %CreateInt16x8(s, s, s, s, s, s, s, s);
+}
+
+
+function Int16x8SwizzleJS(a, c0, c1, c2, c3, c4, c5, c6, c7) {
+ return %Int16x8Swizzle(a, c0, c1, c2, c3, c4, c5, c6, c7);
+}
+
+
+function Int16x8ShuffleJS(a, b, c0, c1, c2, c3, c4, c5, c6, c7) {
+ return %Int16x8Shuffle(a, b, c0, c1, c2, c3, c4, c5, c6, c7);
+}
+
+
+function Bool16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
+ if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Bool16x8");
+ return %CreateBool16x8(c0, c1, c2, c3, c4, c5, c6, c7);
+}
+
+
+function Bool16x8Splat(s) {
+ return %CreateBool16x8(s, s, s, s, s, s, s, s);
+}
+
+
+function Bool16x8SwizzleJS(a, c0, c1, c2, c3, c4, c5, c6, c7) {
+ return %Bool16x8Swizzle(a, c0, c1, c2, c3, c4, c5, c6, c7);
+}
+
+
+function Bool16x8ShuffleJS(a, b, c0, c1, c2, c3, c4, c5, c6, c7) {
+ return %Bool16x8Shuffle(a, b, c0, c1, c2, c3, c4, c5, c6, c7);
+}
+
+
+function Int8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
+ c12, c13, c14, c15) {
+ if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Int8x16");
+ return %CreateInt8x16(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
+ TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3),
+ TO_NUMBER_INLINE(c4), TO_NUMBER_INLINE(c5),
+ TO_NUMBER_INLINE(c6), TO_NUMBER_INLINE(c7),
+ TO_NUMBER_INLINE(c8), TO_NUMBER_INLINE(c9),
+ TO_NUMBER_INLINE(c10), TO_NUMBER_INLINE(c11),
+ TO_NUMBER_INLINE(c12), TO_NUMBER_INLINE(c13),
+ TO_NUMBER_INLINE(c14), TO_NUMBER_INLINE(c15));
+}
+
+
+function Int8x16Splat(s) {
+ return %CreateInt8x16(s, s, s, s, s, s, s, s, s, s, s, s, s, s, s, s);
+}
+
+
+function Int8x16SwizzleJS(a, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
+ c12, c13, c14, c15) {
+ return %Int8x16Swizzle(a, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
+ c12, c13, c14, c15);
+}
+
+
+function Int8x16ShuffleJS(a, b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10,
+ c11, c12, c13, c14, c15) {
+ return %Int8x16Shuffle(a, b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10,
+ c11, c12, c13, c14, c15);
+}
+
+
+function Bool8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
+ c12, c13, c14, c15) {
+ if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Bool8x16");
+ return %CreateBool8x16(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12,
+ c13, c14, c15);
+}
+
+
+function Bool8x16Splat(s) {
+ return %CreateBool8x16(s, s, s, s, s, s, s, s, s, s, s, s, s, s, s, s);
+}
+
+
+function Bool8x16SwizzleJS(a, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
+ c12, c13, c14, c15) {
+ return %Bool8x16Swizzle(a, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
+ c12, c13, c14, c15);
+}
+
+
+function Bool8x16ShuffleJS(a, b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10,
+ c11, c12, c13, c14, c15) {
+ return %Bool8x16Shuffle(a, b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10,
+ c11, c12, c13, c14, c15);
+}
+
+
+%AddNamedProperty(GlobalSIMD, symbolToStringTag, 'SIMD', READ_ONLY | DONT_ENUM);
+
+macro SETUP_SIMD_TYPE(NAME, TYPE, LANES)
+%SetCode(GlobalNAME, NAMEConstructor);
+%FunctionSetPrototype(GlobalNAME, {});
+%AddNamedProperty(GlobalNAME.prototype, 'constructor', GlobalNAME,
+ DONT_ENUM);
+%AddNamedProperty(GlobalNAME.prototype, symbolToStringTag, 'NAME',
+ DONT_ENUM | READ_ONLY);
+utils.InstallFunctions(GlobalNAME.prototype, DONT_ENUM, [
+ 'toLocaleString', NAMEToLocaleString,
+ 'toString', NAMEToString,
+ 'valueOf', NAMEValueOf,
+]);
+endmacro
+
+SIMD_ALL_TYPES(SETUP_SIMD_TYPE)
+
+//-------------------------------------------------------------------
+
+utils.InstallFunctions(GlobalFloat32x4, DONT_ENUM, [
+ 'splat', Float32x4Splat,
+ 'check', Float32x4CheckJS,
+ 'extractLane', Float32x4ExtractLaneJS,
+ 'replaceLane', Float32x4ReplaceLaneJS,
+ 'neg', Float32x4NegJS,
+ 'abs', Float32x4AbsJS,
+ 'sqrt', Float32x4SqrtJS,
+ 'reciprocalApproximation', Float32x4RecipApproxJS,
+ 'reciprocalSqrtApproximation', Float32x4RecipSqrtApproxJS,
+ 'add', Float32x4AddJS,
+ 'sub', Float32x4SubJS,
+ 'mul', Float32x4MulJS,
+ 'div', Float32x4DivJS,
+ 'min', Float32x4MinJS,
+ 'max', Float32x4MaxJS,
+ 'minNum', Float32x4MinNumJS,
+ 'maxNum', Float32x4MaxNumJS,
+ 'lessThan', Float32x4LessThanJS,
+ 'lessThanOrEqual', Float32x4LessThanOrEqualJS,
+ 'greaterThan', Float32x4GreaterThanJS,
+ 'greaterThanOrEqual', Float32x4GreaterThanOrEqualJS,
+ 'equal', Float32x4EqualJS,
+ 'notEqual', Float32x4NotEqualJS,
+ 'select', Float32x4SelectJS,
+ 'swizzle', Float32x4SwizzleJS,
+ 'shuffle', Float32x4ShuffleJS,
+ 'fromInt32x4', Float32x4FromInt32x4JS,
+ 'fromInt32x4Bits', Float32x4FromInt32x4BitsJS,
+ 'fromInt16x8Bits', Float32x4FromInt16x8BitsJS,
+ 'fromInt8x16Bits', Float32x4FromInt8x16BitsJS,
+]);
+
+utils.InstallFunctions(GlobalInt32x4, DONT_ENUM, [
+ 'splat', Int32x4Splat,
+ 'check', Int32x4CheckJS,
+ 'extractLane', Int32x4ExtractLaneJS,
+ 'replaceLane', Int32x4ReplaceLaneJS,
+ 'neg', Int32x4NegJS,
+ 'add', Int32x4AddJS,
+ 'sub', Int32x4SubJS,
+ 'mul', Int32x4MulJS,
+ 'min', Int32x4MinJS,
+ 'max', Int32x4MaxJS,
+ 'and', Int32x4AndJS,
+ 'or', Int32x4OrJS,
+ 'xor', Int32x4XorJS,
+ 'not', Int32x4NotJS,
+ 'shiftLeftByScalar', Int32x4ShiftLeftByScalarJS,
+ 'shiftRightLogicalByScalar', Int32x4ShiftRightLogicalByScalarJS,
+ 'shiftRightArithmeticByScalar', Int32x4ShiftRightArithmeticByScalarJS,
+ 'lessThan', Int32x4LessThanJS,
+ 'lessThanOrEqual', Int32x4LessThanOrEqualJS,
+ 'greaterThan', Int32x4GreaterThanJS,
+ 'greaterThanOrEqual', Int32x4GreaterThanOrEqualJS,
+ 'equal', Int32x4EqualJS,
+ 'notEqual', Int32x4NotEqualJS,
+ 'select', Int32x4SelectJS,
+ 'swizzle', Int32x4SwizzleJS,
+ 'shuffle', Int32x4ShuffleJS,
+ 'fromFloat32x4', Int32x4FromFloat32x4JS,
+ 'fromFloat32x4Bits', Int32x4FromFloat32x4BitsJS,
+ 'fromInt16x8Bits', Int32x4FromInt16x8BitsJS,
+ 'fromInt8x16Bits', Int32x4FromInt8x16BitsJS,
+]);
+
+utils.InstallFunctions(GlobalBool32x4, DONT_ENUM, [
+ 'splat', Bool32x4Splat,
+ 'check', Bool32x4CheckJS,
+ 'extractLane', Bool32x4ExtractLaneJS,
+ 'replaceLane', Bool32x4ReplaceLaneJS,
+ 'and', Bool32x4AndJS,
+ 'or', Bool32x4OrJS,
+ 'xor', Bool32x4XorJS,
+ 'not', Bool32x4NotJS,
+ 'anyTrue', Bool32x4AnyTrueJS,
+ 'allTrue', Bool32x4AllTrueJS,
+ 'equal', Bool32x4EqualJS,
+ 'notEqual', Bool32x4NotEqualJS,
+ 'swizzle', Bool32x4SwizzleJS,
+ 'shuffle', Bool32x4ShuffleJS,
+]);
+
+utils.InstallFunctions(GlobalInt16x8, DONT_ENUM, [
+ 'splat', Int16x8Splat,
+ 'check', Int16x8CheckJS,
+ 'extractLane', Int16x8ExtractLaneJS,
+ 'unsignedExtractLane', Int16x8UnsignedExtractLaneJS,
+ 'replaceLane', Int16x8ReplaceLaneJS,
+ 'neg', Int16x8NegJS,
+ 'add', Int16x8AddJS,
+ 'sub', Int16x8SubJS,
+ 'mul', Int16x8MulJS,
+ 'min', Int16x8MinJS,
+ 'max', Int16x8MaxJS,
+ 'and', Int16x8AndJS,
+ 'or', Int16x8OrJS,
+ 'xor', Int16x8XorJS,
+ 'not', Int16x8NotJS,
+ 'shiftLeftByScalar', Int16x8ShiftLeftByScalarJS,
+ 'shiftRightLogicalByScalar', Int16x8ShiftRightLogicalByScalarJS,
+ 'shiftRightArithmeticByScalar', Int16x8ShiftRightArithmeticByScalarJS,
+ 'lessThan', Int16x8LessThanJS,
+ 'lessThanOrEqual', Int16x8LessThanOrEqualJS,
+ 'greaterThan', Int16x8GreaterThanJS,
+ 'greaterThanOrEqual', Int16x8GreaterThanOrEqualJS,
+ 'equal', Int16x8EqualJS,
+ 'notEqual', Int16x8NotEqualJS,
+ 'select', Int16x8SelectJS,
+ 'swizzle', Int16x8SwizzleJS,
+ 'shuffle', Int16x8ShuffleJS,
+ 'fromFloat32x4Bits', Int16x8FromFloat32x4BitsJS,
+ 'fromInt32x4Bits', Int16x8FromInt32x4BitsJS,
+ 'fromInt8x16Bits', Int16x8FromInt8x16BitsJS,
+]);
+
+utils.InstallFunctions(GlobalBool16x8, DONT_ENUM, [
+ 'splat', Bool16x8Splat,
+ 'check', Bool16x8CheckJS,
+ 'extractLane', Bool16x8ExtractLaneJS,
+ 'replaceLane', Bool16x8ReplaceLaneJS,
+ 'and', Bool16x8AndJS,
+ 'or', Bool16x8OrJS,
+ 'xor', Bool16x8XorJS,
+ 'not', Bool16x8NotJS,
+ 'anyTrue', Bool16x8AnyTrueJS,
+ 'allTrue', Bool16x8AllTrueJS,
+ 'equal', Bool16x8EqualJS,
+ 'notEqual', Bool16x8NotEqualJS,
+ 'swizzle', Bool16x8SwizzleJS,
+ 'shuffle', Bool16x8ShuffleJS,
+]);
+
+utils.InstallFunctions(GlobalInt8x16, DONT_ENUM, [
+ 'splat', Int8x16Splat,
+ 'check', Int8x16CheckJS,
+ 'extractLane', Int8x16ExtractLaneJS,
+ 'unsignedExtractLane', Int8x16UnsignedExtractLaneJS,
+ 'replaceLane', Int8x16ReplaceLaneJS,
+ 'neg', Int8x16NegJS,
+ 'add', Int8x16AddJS,
+ 'sub', Int8x16SubJS,
+ 'mul', Int8x16MulJS,
+ 'min', Int8x16MinJS,
+ 'max', Int8x16MaxJS,
+ 'and', Int8x16AndJS,
+ 'or', Int8x16OrJS,
+ 'xor', Int8x16XorJS,
+ 'not', Int8x16NotJS,
+ 'shiftLeftByScalar', Int8x16ShiftLeftByScalarJS,
+ 'shiftRightLogicalByScalar', Int8x16ShiftRightLogicalByScalarJS,
+ 'shiftRightArithmeticByScalar', Int8x16ShiftRightArithmeticByScalarJS,
+ 'lessThan', Int8x16LessThanJS,
+ 'lessThanOrEqual', Int8x16LessThanOrEqualJS,
+ 'greaterThan', Int8x16GreaterThanJS,
+ 'greaterThanOrEqual', Int8x16GreaterThanOrEqualJS,
+ 'equal', Int8x16EqualJS,
+ 'notEqual', Int8x16NotEqualJS,
+ 'select', Int8x16SelectJS,
+ 'swizzle', Int8x16SwizzleJS,
+ 'shuffle', Int8x16ShuffleJS,
+ 'fromFloat32x4Bits', Int8x16FromFloat32x4BitsJS,
+ 'fromInt32x4Bits', Int8x16FromInt32x4BitsJS,
+ 'fromInt16x8Bits', Int8x16FromInt16x8BitsJS,
+]);
+
+utils.InstallFunctions(GlobalBool8x16, DONT_ENUM, [
+ 'splat', Bool8x16Splat,
+ 'check', Bool8x16CheckJS,
+ 'extractLane', Bool8x16ExtractLaneJS,
+ 'replaceLane', Bool8x16ReplaceLaneJS,
+ 'and', Bool8x16AndJS,
+ 'or', Bool8x16OrJS,
+ 'xor', Bool8x16XorJS,
+ 'not', Bool8x16NotJS,
+ 'anyTrue', Bool8x16AnyTrueJS,
+ 'allTrue', Bool8x16AllTrueJS,
+ 'equal', Bool8x16EqualJS,
+ 'notEqual', Bool8x16NotEqualJS,
+ 'swizzle', Bool8x16SwizzleJS,
+ 'shuffle', Bool8x16ShuffleJS,
+]);
+
+utils.Export(function(to) {
+ to.Float32x4ToString = Float32x4ToString;
+ to.Int32x4ToString = Int32x4ToString;
+ to.Bool32x4ToString = Bool32x4ToString;
+ to.Int16x8ToString = Int16x8ToString;
+ to.Bool16x8ToString = Bool16x8ToString;
+ to.Int8x16ToString = Int8x16ToString;
+ to.Bool8x16ToString = Bool8x16ToString;
+});
+
+})
diff --git a/deps/v8/src/harmony-typedarray.js b/deps/v8/src/harmony-typedarray.js
index b9cc798ad2..cd220dae83 100644
--- a/deps/v8/src/harmony-typedarray.js
+++ b/deps/v8/src/harmony-typedarray.js
@@ -44,13 +44,14 @@ var InnerArrayIndexOf;
var InnerArrayJoin;
var InnerArrayLastIndexOf;
var InnerArrayMap;
-var InnerArrayReverse;
var InnerArraySome;
var InnerArraySort;
var InnerArrayToLocaleString;
var IsNaN;
var MathMax;
var MathMin;
+var PackedArrayReverse;
+var ToNumber;
utils.Import(function(from) {
ArrayFrom = from.ArrayFrom;
@@ -68,13 +69,14 @@ utils.Import(function(from) {
InnerArrayMap = from.InnerArrayMap;
InnerArrayReduce = from.InnerArrayReduce;
InnerArrayReduceRight = from.InnerArrayReduceRight;
- InnerArrayReverse = from.InnerArrayReverse;
InnerArraySome = from.InnerArraySome;
InnerArraySort = from.InnerArraySort;
InnerArrayToLocaleString = from.InnerArrayToLocaleString;
IsNaN = from.IsNaN;
MathMax = from.MathMax;
MathMin = from.MathMin;
+ PackedArrayReverse = from.PackedArrayReverse;
+ ToNumber = from.ToNumber;
});
// -------------------------------------------------------------------
@@ -179,7 +181,7 @@ function TypedArrayReverse() {
var length = %_TypedArrayGetLength(this);
- return InnerArrayReverse(this, length);
+ return PackedArrayReverse(this, length);
}
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index 3f11be4ce4..68d94ccbd9 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -186,5 +186,8 @@ void HeapProfiler::ClearHeapObjectMap() {
}
+Heap* HeapProfiler::heap() const { return ids_->heap(); }
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h
index 68e13656c6..16dd08a265 100644
--- a/deps/v8/src/heap-profiler.h
+++ b/deps/v8/src/heap-profiler.h
@@ -5,14 +5,17 @@
#ifndef V8_HEAP_PROFILER_H_
#define V8_HEAP_PROFILER_H_
-#include "src/heap-snapshot-generator-inl.h"
+#include "src/base/smart-pointers.h"
#include "src/isolate.h"
-#include "src/smart-pointers.h"
namespace v8 {
namespace internal {
+// Forward declarations.
+class AllocationTracker;
+class HeapObjectsMap;
class HeapSnapshot;
+class StringsStorage;
class HeapProfiler {
public:
@@ -63,14 +66,14 @@ class HeapProfiler {
void ClearHeapObjectMap();
private:
- Heap* heap() const { return ids_->heap(); }
+ Heap* heap() const;
// Mapping from HeapObject addresses to objects' uids.
- SmartPointer<HeapObjectsMap> ids_;
+ base::SmartPointer<HeapObjectsMap> ids_;
List<HeapSnapshot*> snapshots_;
- SmartPointer<StringsStorage> names_;
+ base::SmartPointer<StringsStorage> names_;
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
- SmartPointer<AllocationTracker> allocation_tracker_;
+ base::SmartPointer<AllocationTracker> allocation_tracker_;
bool is_tracking_object_moves_;
};
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index f1bdc71cca..f9c235ed94 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -9,7 +9,7 @@
#include "src/allocation-tracker.h"
#include "src/code-stubs.h"
#include "src/conversions.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/heap-profiler.h"
#include "src/types.h"
@@ -856,14 +856,12 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
return AddEntry(object, HeapEntry::kHidden, "system / NativeContext");
} else if (object->IsContext()) {
return AddEntry(object, HeapEntry::kObject, "system / Context");
- } else if (object->IsFixedArray() ||
- object->IsFixedDoubleArray() ||
- object->IsByteArray() ||
- object->IsExternalArray()) {
+ } else if (object->IsFixedArray() || object->IsFixedDoubleArray() ||
+ object->IsByteArray()) {
return AddEntry(object, HeapEntry::kArray, "");
} else if (object->IsHeapNumber()) {
return AddEntry(object, HeapEntry::kHeapNumber, "number");
- } else if (object->IsFloat32x4()) {
+ } else if (object->IsSimd128Value()) {
return AddEntry(object, HeapEntry::kSimdValue, "simd");
}
return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
@@ -1265,8 +1263,6 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension);
EXTRACT_CONTEXT_FIELD(GLOBAL_OBJECT_INDEX, GlobalObject, global);
if (context->IsNativeContext()) {
- TagObject(context->jsfunction_result_caches(),
- "(context func. result caches)");
TagObject(context->normalized_map_cache(), "(context norm. map cache)");
TagObject(context->runtime_context(), "(runtime context)");
TagObject(context->embedder_data(), "(context data)");
@@ -1894,18 +1890,17 @@ bool V8HeapExplorer::IterateAndExtractSinglePass() {
bool V8HeapExplorer::IsEssentialObject(Object* object) {
- return object->IsHeapObject()
- && !object->IsOddball()
- && object != heap_->empty_byte_array()
- && object != heap_->empty_fixed_array()
- && object != heap_->empty_descriptor_array()
- && object != heap_->fixed_array_map()
- && object != heap_->cell_map()
- && object != heap_->global_property_cell_map()
- && object != heap_->shared_function_info_map()
- && object != heap_->free_space_map()
- && object != heap_->one_pointer_filler_map()
- && object != heap_->two_pointer_filler_map();
+ return object->IsHeapObject() && !object->IsOddball() &&
+ object != heap_->empty_byte_array() &&
+ object != heap_->empty_bytecode_array() &&
+ object != heap_->empty_fixed_array() &&
+ object != heap_->empty_descriptor_array() &&
+ object != heap_->fixed_array_map() && object != heap_->cell_map() &&
+ object != heap_->global_property_cell_map() &&
+ object != heap_->shared_function_info_map() &&
+ object != heap_->free_space_map() &&
+ object != heap_->one_pointer_filler_map() &&
+ object != heap_->two_pointer_filler_map();
}
diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/heap-snapshot-generator.h
index ed0ca89839..5693cc16c3 100644
--- a/deps/v8/src/heap-snapshot-generator.h
+++ b/deps/v8/src/heap-snapshot-generator.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_SNAPSHOT_GENERATOR_H_
#define V8_HEAP_SNAPSHOT_GENERATOR_H_
+#include "src/base/platform/time.h"
#include "src/strings-storage.h"
namespace v8 {
diff --git a/deps/v8/src/heap/OWNERS b/deps/v8/src/heap/OWNERS
index 07428b275b..a8533293e1 100644
--- a/deps/v8/src/heap/OWNERS
+++ b/deps/v8/src/heap/OWNERS
@@ -1 +1,6 @@
+set noparent
+
hpayer@chromium.org
+mlippautz@chromium.org
+mstarzinger@chromium.org
+ulan@chromium.org
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index f76c48bf9e..096412d578 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/flags.h"
#include "src/heap/gc-idle-time-handler.h"
+
+#include "src/flags.h"
#include "src/heap/gc-tracer.h"
#include "src/utils.h"
@@ -129,10 +130,7 @@ bool GCIdleTimeHandler::ShouldDoScavenge(
// We do not know the allocation throughput before the first scavenge.
// TODO(hpayer): Estimate allocation throughput before the first scavenge.
- if (new_space_allocation_throughput_in_bytes_per_ms == 0) {
- new_space_allocation_limit =
- static_cast<size_t>(new_space_size * kConservativeTimeRatio);
- } else {
+ if (new_space_allocation_throughput_in_bytes_per_ms > 0) {
// We have to trigger scavenge before we reach the end of new space.
size_t adjust_limit = new_space_allocation_throughput_in_bytes_per_ms *
kTimeUntilNextIdleEvent;
@@ -143,6 +141,13 @@ bool GCIdleTimeHandler::ShouldDoScavenge(
}
}
+ if (new_space_allocation_throughput_in_bytes_per_ms <
+ kLowAllocationThroughput) {
+ new_space_allocation_limit =
+ Min(new_space_allocation_limit,
+ static_cast<size_t>(new_space_size * kConservativeTimeRatio));
+ }
+
// The allocated new space limit to trigger a scavange has to be at least
// kMinimumNewSpaceSizeToPerformScavenge.
if (new_space_allocation_limit < kMinimumNewSpaceSizeToPerformScavenge) {
diff --git a/deps/v8/src/heap/gc-idle-time-handler.h b/deps/v8/src/heap/gc-idle-time-handler.h
index 8f12a446f2..ebd132e752 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.h
+++ b/deps/v8/src/heap/gc-idle-time-handler.h
@@ -124,6 +124,10 @@ class GCIdleTimeHandler {
// no idle notification happens.
static const size_t kTimeUntilNextIdleEvent = 100;
+ // An allocation throughput below kLowAllocationThroughput bytes/ms is
+ // considered low
+ static const size_t kLowAllocationThroughput = 1000;
+
// If we haven't recorded any scavenger events yet, we use a conservative
// lower bound for the scavenger speed.
static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 6728f09bda..3b8e24b474 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -2,10 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/heap/gc-tracer.h"
+#include "src/counters.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+
namespace v8 {
namespace internal {
@@ -399,36 +402,69 @@ void GCTracer::PrintNVP() const {
PrintF("mutator=%.1f ", spent_in_mutator);
PrintF("gc=%s ", current_.TypeName(true));
- PrintF("external=%.1f ", current_.scopes[Scope::EXTERNAL]);
- PrintF("mark=%.1f ", current_.scopes[Scope::MC_MARK]);
- PrintF("sweep=%.2f ", current_.scopes[Scope::MC_SWEEP]);
- PrintF("sweepns=%.2f ", current_.scopes[Scope::MC_SWEEP_NEWSPACE]);
- PrintF("sweepos=%.2f ", current_.scopes[Scope::MC_SWEEP_OLDSPACE]);
- PrintF("sweepcode=%.2f ", current_.scopes[Scope::MC_SWEEP_CODE]);
- PrintF("sweepcell=%.2f ", current_.scopes[Scope::MC_SWEEP_CELL]);
- PrintF("sweepmap=%.2f ", current_.scopes[Scope::MC_SWEEP_MAP]);
- PrintF("evacuate=%.1f ", current_.scopes[Scope::MC_EVACUATE_PAGES]);
- PrintF("new_new=%.1f ",
- current_.scopes[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
- PrintF("root_new=%.1f ",
- current_.scopes[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
- PrintF("old_new=%.1f ",
- current_.scopes[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
- PrintF("compaction_ptrs=%.1f ",
- current_.scopes[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
- PrintF("intracompaction_ptrs=%.1f ",
- current_.scopes[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
- PrintF("misc_compaction=%.1f ",
- current_.scopes[Scope::MC_UPDATE_MISC_POINTERS]);
- PrintF("weak_closure=%.1f ", current_.scopes[Scope::MC_WEAKCLOSURE]);
- PrintF("inc_weak_closure=%.1f ",
- current_.scopes[Scope::MC_INCREMENTAL_WEAKCLOSURE]);
- PrintF("weakcollection_process=%.1f ",
- current_.scopes[Scope::MC_WEAKCOLLECTION_PROCESS]);
- PrintF("weakcollection_clear=%.1f ",
- current_.scopes[Scope::MC_WEAKCOLLECTION_CLEAR]);
- PrintF("weakcollection_abort=%.1f ",
- current_.scopes[Scope::MC_WEAKCOLLECTION_ABORT]);
+ switch (current_.type) {
+ case Event::SCAVENGER:
+ PrintF("scavenge=%.2f ", current_.scopes[Scope::SCAVENGER_SCAVENGE]);
+ PrintF("old_new=%.2f ",
+ current_.scopes[Scope::SCAVENGER_OLD_TO_NEW_POINTERS]);
+ PrintF("weak=%.2f ", current_.scopes[Scope::SCAVENGER_WEAK]);
+ PrintF("roots=%.2f ", current_.scopes[Scope::SCAVENGER_ROOTS]);
+ PrintF("code=%.2f ",
+ current_.scopes[Scope::SCAVENGER_CODE_FLUSH_CANDIDATES]);
+ PrintF("semispace=%.2f ", current_.scopes[Scope::SCAVENGER_SEMISPACE]);
+ PrintF("object_groups=%.2f ",
+ current_.scopes[Scope::SCAVENGER_OBJECT_GROUPS]);
+ PrintF("steps_count=%d ", current_.incremental_marking_steps);
+ PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
+ PrintF("scavenge_throughput=%" V8_PTR_PREFIX "d ",
+ ScavengeSpeedInBytesPerMillisecond());
+ break;
+ case Event::MARK_COMPACTOR:
+ case Event::INCREMENTAL_MARK_COMPACTOR:
+ PrintF("external=%.1f ", current_.scopes[Scope::EXTERNAL]);
+ PrintF("mark=%.1f ", current_.scopes[Scope::MC_MARK]);
+ PrintF("sweep=%.2f ", current_.scopes[Scope::MC_SWEEP]);
+ PrintF("sweepns=%.2f ", current_.scopes[Scope::MC_SWEEP_NEWSPACE]);
+ PrintF("sweepos=%.2f ", current_.scopes[Scope::MC_SWEEP_OLDSPACE]);
+ PrintF("sweepcode=%.2f ", current_.scopes[Scope::MC_SWEEP_CODE]);
+ PrintF("sweepcell=%.2f ", current_.scopes[Scope::MC_SWEEP_CELL]);
+ PrintF("sweepmap=%.2f ", current_.scopes[Scope::MC_SWEEP_MAP]);
+ PrintF("rescan_lo=%.2f ",
+ current_.scopes[Scope::MC_RESCAN_LARGE_OBJECTS]);
+ PrintF("evacuate=%.1f ", current_.scopes[Scope::MC_EVACUATE_PAGES]);
+ PrintF("new_new=%.1f ",
+ current_.scopes[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
+ PrintF("root_new=%.1f ",
+ current_.scopes[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
+ PrintF("old_new=%.1f ",
+ current_.scopes[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
+ PrintF("compaction_ptrs=%.1f ",
+ current_.scopes[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
+ PrintF("intracompaction_ptrs=%.1f ",
+ current_.scopes[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
+ PrintF("misc_compaction=%.1f ",
+ current_.scopes[Scope::MC_UPDATE_MISC_POINTERS]);
+ PrintF("weak_closure=%.1f ", current_.scopes[Scope::MC_WEAKCLOSURE]);
+ PrintF("inc_weak_closure=%.1f ",
+ current_.scopes[Scope::MC_INCREMENTAL_WEAKCLOSURE]);
+ PrintF("weakcollection_process=%.1f ",
+ current_.scopes[Scope::MC_WEAKCOLLECTION_PROCESS]);
+ PrintF("weakcollection_clear=%.1f ",
+ current_.scopes[Scope::MC_WEAKCOLLECTION_CLEAR]);
+ PrintF("weakcollection_abort=%.1f ",
+ current_.scopes[Scope::MC_WEAKCOLLECTION_ABORT]);
+
+ PrintF("steps_count=%d ", current_.incremental_marking_steps);
+ PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
+ PrintF("longest_step=%.1f ", current_.longest_incremental_marking_step);
+ PrintF("incremental_marking_throughput=%" V8_PTR_PREFIX "d ",
+ IncrementalMarkingSpeedInBytesPerMillisecond());
+ break;
+ case Event::START:
+ break;
+ default:
+ UNREACHABLE();
+ }
PrintF("total_size_before=%" V8_PTR_PREFIX "d ", current_.start_object_size);
PrintF("total_size_after=%" V8_PTR_PREFIX "d ", current_.end_object_size);
@@ -452,19 +488,6 @@ void GCTracer::PrintNVP() const {
NewSpaceAllocationThroughputInBytesPerMillisecond());
PrintF("context_disposal_rate=%.1f ", ContextDisposalRateInMilliseconds());
- if (current_.type == Event::SCAVENGER) {
- PrintF("steps_count=%d ", current_.incremental_marking_steps);
- PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
- PrintF("scavenge_throughput=%" V8_PTR_PREFIX "d ",
- ScavengeSpeedInBytesPerMillisecond());
- } else {
- PrintF("steps_count=%d ", current_.incremental_marking_steps);
- PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
- PrintF("longest_step=%.1f ", current_.longest_incremental_marking_step);
- PrintF("incremental_marking_throughput=%" V8_PTR_PREFIX "d ",
- IncrementalMarkingSpeedInBytesPerMillisecond());
- }
-
PrintF("\n");
}
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index e26fc898f9..7572059dc9 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -6,6 +6,7 @@
#define V8_HEAP_GC_TRACER_H_
#include "src/base/platform/platform.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -104,6 +105,7 @@ class GCTracer {
MC_SWEEP_CODE,
MC_SWEEP_CELL,
MC_SWEEP_MAP,
+ MC_RESCAN_LARGE_OBJECTS,
MC_EVACUATE_PAGES,
MC_UPDATE_NEW_TO_NEW_POINTERS,
MC_UPDATE_ROOT_TO_NEW_POINTERS,
@@ -117,6 +119,13 @@ class GCTracer {
MC_WEAKCOLLECTION_CLEAR,
MC_WEAKCOLLECTION_ABORT,
MC_FLUSH_CODE,
+ SCAVENGER_CODE_FLUSH_CANDIDATES,
+ SCAVENGER_OBJECT_GROUPS,
+ SCAVENGER_OLD_TO_NEW_POINTERS,
+ SCAVENGER_ROOTS,
+ SCAVENGER_SCAVENGE,
+ SCAVENGER_SEMISPACE,
+ SCAVENGER_WEAK,
NUMBER_OF_SCOPES
};
@@ -410,13 +419,13 @@ class GCTracer {
// Returns 0 if no allocation events have been recorded.
size_t AllocationThroughputInBytesPerMillisecond(double time_ms) const;
- // Allocation throughput in heap in bytes/milliseconds in
- // the last five seconds.
+ // Allocation throughput in heap in bytes/milliseconds in the last
+ // kThroughputTimeFrameMs seconds.
// Returns 0 if no allocation events have been recorded.
size_t CurrentAllocationThroughputInBytesPerMillisecond() const;
- // Allocation throughput in old generation in bytes/milliseconds in
- // the last five seconds.
+ // Allocation throughput in old generation in bytes/milliseconds in the last
+ // kThroughputTimeFrameMs seconds.
// Returns 0 if no allocation events have been recorded.
size_t CurrentOldGenerationAllocationThroughputInBytesPerMillisecond() const;
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index fdb1d7345b..aecdd40988 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -8,12 +8,16 @@
#include <cmath>
#include "src/base/platform/platform.h"
+#include "src/counters.h"
#include "src/heap/heap.h"
+#include "src/heap/incremental-marking-inl.h"
+#include "src/heap/spaces-inl.h"
#include "src/heap/store-buffer.h"
#include "src/heap/store-buffer-inl.h"
#include "src/heap-profiler.h"
#include "src/isolate.h"
#include "src/list-inl.h"
+#include "src/log.h"
#include "src/msan.h"
#include "src/objects.h"
@@ -42,6 +46,44 @@ void PromotionQueue::insert(HeapObject* target, int size) {
}
+#define ROOT_ACCESSOR(type, name, camel_name) \
+ type* Heap::name() { return type::cast(roots_[k##camel_name##RootIndex]); }
+ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
+ Map* Heap::name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); }
+STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
+
+#define STRING_ACCESSOR(name, str) \
+ String* Heap::name() { return String::cast(roots_[k##name##RootIndex]); }
+INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
+#undef STRING_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name) \
+ Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
+PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name, varname, description) \
+ Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
+PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+#define ROOT_ACCESSOR(type, name, camel_name) \
+ void Heap::set_##name(type* value) { \
+ /* The deserializer makes use of the fact that these common roots are */ \
+ /* never in new space and never on a page that is being compacted. */ \
+ DCHECK(!deserialization_complete() || \
+ RootCanBeWrittenAfterInitialization(k##camel_name##RootIndex)); \
+ DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
+ roots_[k##camel_name##RootIndex] = value; \
+ }
+ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+
template <>
bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
// TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
@@ -629,6 +671,27 @@ void ExternalStringTable::ShrinkNewStrings(int position) {
}
+int DescriptorLookupCache::Lookup(Map* source, Name* name) {
+ if (!name->IsUniqueName()) return kAbsent;
+ int index = Hash(source, name);
+ Key& key = keys_[index];
+ if ((key.source == source) && (key.name == name)) return results_[index];
+ return kAbsent;
+}
+
+
+void DescriptorLookupCache::Update(Map* source, Name* name, int result) {
+ DCHECK(result != kAbsent);
+ if (name->IsUniqueName()) {
+ int index = Hash(source, name);
+ Key& key = keys_[index];
+ key.source = source;
+ key.name = name;
+ results_[index] = result;
+ }
+}
+
+
void Heap::ClearInstanceofCache() {
set_instanceof_cache_function(Smi::FromInt(0));
}
@@ -645,6 +708,46 @@ void Heap::CompletelyClearInstanceofCache() {
}
+uint32_t Heap::HashSeed() {
+ uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
+ DCHECK(FLAG_randomize_hashes || seed == 0);
+ return seed;
+}
+
+
+Smi* Heap::NextScriptId() {
+ int next_id = last_script_id()->value() + 1;
+ if (!Smi::IsValid(next_id) || next_id < 0) next_id = 1;
+ Smi* next_id_smi = Smi::FromInt(next_id);
+ set_last_script_id(next_id_smi);
+ return next_id_smi;
+}
+
+
+void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
+ DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
+ set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+
+void Heap::SetConstructStubDeoptPCOffset(int pc_offset) {
+ DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
+ set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+
+void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
+ DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
+ set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+
+void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
+ DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
+ set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+
AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
: heap_(isolate->heap()), daf_(isolate) {
heap_->always_allocate_scope_depth_++;
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 5bcc9097ee..e3cf13673f 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/heap/heap.h"
#include "src/accessors.h"
#include "src/api.h"
@@ -14,43 +14,29 @@
#include "src/compilation-cache.h"
#include "src/conversions.h"
#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-reducer.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/store-buffer.h"
#include "src/heap-profiler.h"
+#include "src/interpreter/interpreter.h"
#include "src/runtime-profiler.h"
#include "src/scopeinfo.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/serialize.h"
#include "src/snapshot/snapshot.h"
#include "src/utils.h"
+#include "src/v8.h"
#include "src/v8threads.h"
#include "src/vm-state-inl.h"
-#if V8_TARGET_ARCH_PPC && !V8_INTERPRETED_REGEXP
-#include "src/regexp-macro-assembler.h" // NOLINT
-#include "src/ppc/regexp-macro-assembler-ppc.h" // NOLINT
-#endif
-#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
-#include "src/regexp-macro-assembler.h" // NOLINT
-#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
-#endif
-#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
-#include "src/regexp-macro-assembler.h" // NOLINT
-#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
-#endif
-#if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
-#include "src/regexp-macro-assembler.h"
-#include "src/mips64/regexp-macro-assembler-mips64.h"
-#endif
-
namespace v8 {
namespace internal {
@@ -85,7 +71,6 @@ Heap::Heap()
maximum_committed_(0),
survived_since_last_expansion_(0),
survived_last_scavenge_(0),
- sweep_generation_(0),
always_allocate_scope_depth_(0),
contexts_disposed_(0),
global_ic_age_(0),
@@ -113,15 +98,10 @@ Heap::Heap()
inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
hidden_string_(NULL),
- gc_safe_size_of_old_object_(NULL),
total_regexp_code_generated_(0),
tracer_(this),
- new_space_high_promotion_mode_active_(false),
- gathering_lifetime_feedback_(0),
high_survival_rate_period_length_(0),
promoted_objects_size_(0),
- low_survival_rate_period_length_(0),
- survival_rate_(0),
promotion_ratio_(0),
semi_space_copied_object_size_(0),
previous_semi_space_copied_object_size_(0),
@@ -130,8 +110,6 @@ Heap::Heap()
nodes_copied_in_new_space_(0),
nodes_promoted_(0),
maximum_size_scavenges_(0),
- previous_survival_rate_trend_(Heap::STABLE),
- survival_rate_trend_(Heap::STABLE),
max_gc_pause_(0.0),
total_gc_time_ms_(0.0),
max_alive_after_gc_(0),
@@ -142,7 +120,6 @@ Heap::Heap()
last_gc_time_(0.0),
mark_compact_collector_(this),
store_buffer_(this),
- marking_(this),
incremental_marking_(this),
memory_reducer_(this),
full_codegen_bytes_generated_(0),
@@ -156,6 +133,7 @@ Heap::Heap()
ring_buffer_end_(0),
promotion_queue_(this),
configured_(false),
+ current_gc_flags_(Heap::kNoGCFlags),
external_string_table_(this),
chunks_queued_for_free_(NULL),
gc_callbacks_depth_(0),
@@ -170,7 +148,7 @@ Heap::Heap()
#endif
// Ensure old_generation_size_ is a multiple of kPageSize.
- DCHECK(MB >= Page::kPageSize);
+ DCHECK((max_old_generation_size_ & (Page::kPageSize - 1)) == 0);
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
set_native_contexts_list(NULL);
@@ -254,14 +232,6 @@ bool Heap::HasBeenSetUp() {
}
-int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
- if (IntrusiveMarking::IsMarked(object)) {
- return IntrusiveMarking::SizeOfMarkedObject(object);
- }
- return object->SizeFromMap(object->map());
-}
-
-
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
const char** reason) {
// Is global GC requested?
@@ -431,7 +401,6 @@ void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
void Heap::GarbageCollectionPrologue() {
{
AllowHeapAllocation for_the_first_part_of_prologue;
- ClearJSFunctionResultCaches();
gc_count_++;
unflattened_strings_length_ = 0;
@@ -510,6 +479,7 @@ const char* Heap::GetSpaceName(int idx) {
void Heap::ClearAllICsByKind(Code::Kind kind) {
+ // TODO(mvstanton): Do not iterate the heap.
HeapObjectIterator it(code_space());
for (Object* object = it.Next(); object != NULL; object = it.Next()) {
@@ -775,7 +745,7 @@ void Heap::PreprocessStackTraces() {
void Heap::HandleGCRequest() {
if (incremental_marking()->request_type() ==
IncrementalMarking::COMPLETE_MARKING) {
- CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt",
+ CollectAllGarbage(current_gc_flags(), "GC interrupt",
incremental_marking()->CallbackFlags());
return;
}
@@ -802,6 +772,8 @@ void Heap::OverApproximateWeakClosure(const char* gc_reason) {
GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
+ // TODO(mlippautz): Report kGCTypeIncremental once blink updates its
+ // filtering.
CallGCPrologueCallbacks(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
}
}
@@ -813,6 +785,8 @@ void Heap::OverApproximateWeakClosure(const char* gc_reason) {
GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
+ // TODO(mlippautz): Report kGCTypeIncremental once blink updates its
+ // filtering.
CallGCEpilogueCallbacks(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
}
}
@@ -824,9 +798,9 @@ void Heap::CollectAllGarbage(int flags, const char* gc_reason,
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
- mark_compact_collector_.SetFlags(flags);
+ set_current_gc_flags(flags);
CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
- mark_compact_collector_.SetFlags(kNoGCFlags);
+ set_current_gc_flags(kNoGCFlags);
}
@@ -848,8 +822,7 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
isolate()->optimizing_compile_dispatcher()->Flush();
}
isolate()->ClearSerializerData();
- mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
- kReduceMemoryFootprintMask);
+ set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
isolate_->compilation_cache()->Clear();
const int kMaxNumberOfAttempts = 7;
const int kMinNumberOfAttempts = 2;
@@ -860,7 +833,7 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
break;
}
}
- mark_compact_collector()->SetFlags(kNoGCFlags);
+ set_current_gc_flags(kNoGCFlags);
new_space_.Shrink();
UncommitFromSpace();
}
@@ -908,12 +881,9 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
}
}
- if (collector == MARK_COMPACTOR &&
- !mark_compact_collector()->finalize_incremental_marking() &&
- !mark_compact_collector()->abort_incremental_marking() &&
- !incremental_marking()->IsStopped() &&
- !incremental_marking()->should_hurry() &&
- FLAG_incremental_marking_steps) {
+ if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
+ !ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
+ !incremental_marking()->should_hurry() && FLAG_incremental_marking) {
// Make progress in incremental marking.
const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
@@ -983,8 +953,7 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
// Start incremental marking for the next cycle. The heap snapshot
// generator needs incremental marking to stay off after it aborted.
- if (!mark_compact_collector()->abort_incremental_marking() &&
- incremental_marking()->IsStopped() &&
+ if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() &&
incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
incremental_marking()->Start(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue");
}
@@ -1084,7 +1053,7 @@ bool Heap::ReserveSpace(Reservation* reservations) {
bool perform_gc = false;
if (space == LO_SPACE) {
DCHECK_EQ(1, reservation->length());
- perform_gc = !lo_space()->CanAllocateSize(reservation->at(0).size);
+ perform_gc = !CanExpandOldGeneration(reservation->at(0).size);
} else {
for (auto& chunk : *reservation) {
AllocationResult allocation;
@@ -1145,29 +1114,6 @@ void Heap::EnsureFromSpaceIsCommitted() {
}
-void Heap::ClearJSFunctionResultCaches() {
- if (isolate_->bootstrapper()->IsActive()) return;
-
- Object* context = native_contexts_list();
- while (!context->IsUndefined()) {
- // Get the caches for this context. GC can happen when the context
- // is not fully initialized, so the caches can be undefined.
- Object* caches_or_undefined =
- Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
- if (!caches_or_undefined->IsUndefined()) {
- FixedArray* caches = FixedArray::cast(caches_or_undefined);
- // Clear the caches:
- int length = caches->length();
- for (int i = 0; i < length; i++) {
- JSFunctionResultCache::cast(caches->get(i))->Clear();
- }
- }
- // Get the next context:
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
- }
-}
-
-
void Heap::ClearNormalizedMapCaches() {
if (isolate_->bootstrapper()->IsActive() &&
!incremental_marking()->IsMarking()) {
@@ -1213,24 +1159,6 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
} else {
high_survival_rate_period_length_ = 0;
}
-
- if (survival_rate < kYoungSurvivalRateLowThreshold) {
- low_survival_rate_period_length_++;
- } else {
- low_survival_rate_period_length_ = 0;
- }
-
- double survival_rate_diff = survival_rate_ - survival_rate;
-
- if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
- set_survival_rate_trend(DECREASING);
- } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
- set_survival_rate_trend(INCREASING);
- } else {
- set_survival_rate_trend(STABLE);
- }
-
- survival_rate_ = survival_rate;
}
bool Heap::PerformGarbageCollection(
@@ -1276,7 +1204,6 @@ bool Heap::PerformGarbageCollection(
UpdateOldGenerationAllocationCounter();
// Perform mark-sweep with optional compaction.
MarkCompact();
- sweep_generation_++;
old_gen_exhausted_ = false;
old_generation_size_configured_ = true;
// This should be updated before PostGarbageCollectionProcessing, which can
@@ -1288,16 +1215,8 @@ bool Heap::PerformGarbageCollection(
Scavenge();
}
- bool deopted = ProcessPretenuringFeedback();
+ ProcessPretenuringFeedback();
UpdateSurvivalStatistics(start_new_space_size);
-
- // When pretenuring is collecting new feedback, we do not shrink the new space
- // right away.
- if (deopted) {
- RecordDeoptForPretenuring();
- } else {
- ConfigureNewGenerationSize();
- }
ConfigureInitialOldGenerationSize();
isolate_->counters()->objs_since_last_young()->Set(0);
@@ -1319,7 +1238,8 @@ bool Heap::PerformGarbageCollection(
AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
freed_global_handles =
- isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
+ isolate_->global_handles()->PostGarbageCollectionProcessing(
+ collector, gc_callback_flags);
}
gc_post_processing_depth_--;
@@ -1367,10 +1287,9 @@ bool Heap::PerformGarbageCollection(
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_type & gc_prologue_callbacks_[i].gc_type) {
- if (!gc_prologue_callbacks_[i].pass_isolate_) {
- v8::GCPrologueCallback callback =
- reinterpret_cast<v8::GCPrologueCallback>(
- gc_prologue_callbacks_[i].callback);
+ if (!gc_prologue_callbacks_[i].pass_isolate) {
+ v8::GCCallback callback = reinterpret_cast<v8::GCCallback>(
+ gc_prologue_callbacks_[i].callback);
callback(gc_type, flags);
} else {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
@@ -1385,10 +1304,9 @@ void Heap::CallGCEpilogueCallbacks(GCType gc_type,
GCCallbackFlags gc_callback_flags) {
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
- if (!gc_epilogue_callbacks_[i].pass_isolate_) {
- v8::GCPrologueCallback callback =
- reinterpret_cast<v8::GCPrologueCallback>(
- gc_epilogue_callbacks_[i].callback);
+ if (!gc_epilogue_callbacks_[i].pass_isolate) {
+ v8::GCCallback callback = reinterpret_cast<v8::GCCallback>(
+ gc_epilogue_callbacks_[i].callback);
callback(gc_type, gc_callback_flags);
} else {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
@@ -1521,8 +1439,7 @@ void Heap::CheckNewSpaceExpansionCriteria() {
survived_since_last_expansion_ = 0;
}
} else if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
- survived_since_last_expansion_ > new_space_.TotalCapacity() &&
- !new_space_high_promotion_mode_active_) {
+ survived_since_last_expansion_ > new_space_.TotalCapacity()) {
// Grow the size of new space if there is room to grow, and enough data
// has survived scavenge since the last expansion.
new_space_.Grow();
@@ -1653,6 +1570,7 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::Scavenge() {
+ GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
RelocationLock relocation_lock(this);
// There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to
@@ -1704,35 +1622,54 @@ void Heap::Scavenge() {
promotion_queue_.Initialize();
ScavengeVisitor scavenge_visitor(this);
- // Copy roots.
- IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
+ {
+ // Copy roots.
+ GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
+ IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
+ }
- // Copy objects reachable from the old generation.
{
+ // Copy objects reachable from the old generation.
+ GCTracer::Scope gc_scope(tracer(),
+ GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
StoreBufferRebuildScope scope(this, store_buffer(),
&ScavengeStoreBufferCallback);
store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
}
- // Copy objects reachable from the encountered weak collections list.
- scavenge_visitor.VisitPointer(&encountered_weak_collections_);
- // Copy objects reachable from the encountered weak cells.
- scavenge_visitor.VisitPointer(&encountered_weak_cells_);
-
- // Copy objects reachable from the code flushing candidates list.
- MarkCompactCollector* collector = mark_compact_collector();
- if (collector->is_code_flushing_enabled()) {
- collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
+ {
+ GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
+ // Copy objects reachable from the encountered weak collections list.
+ scavenge_visitor.VisitPointer(&encountered_weak_collections_);
+ // Copy objects reachable from the encountered weak cells.
+ scavenge_visitor.VisitPointer(&encountered_weak_cells_);
}
- new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ {
+ // Copy objects reachable from the code flushing candidates list.
+ GCTracer::Scope gc_scope(tracer(),
+ GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES);
+ MarkCompactCollector* collector = mark_compact_collector();
+ if (collector->is_code_flushing_enabled()) {
+ collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
+ }
+ }
- while (isolate()->global_handles()->IterateObjectGroups(
- &scavenge_visitor, &IsUnscavengedHeapObject)) {
+ {
+ GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
}
- isolate()->global_handles()->RemoveObjectGroups();
- isolate()->global_handles()->RemoveImplicitRefGroups();
+
+ {
+ GCTracer::Scope gc_scope(tracer(),
+ GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
+ while (isolate()->global_handles()->IterateObjectGroups(
+ &scavenge_visitor, &IsUnscavengedHeapObject)) {
+ new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ }
+ isolate()->global_handles()->RemoveObjectGroups();
+ isolate()->global_handles()->RemoveImplicitRefGroups();
+ }
isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
&IsUnscavengedHeapObject);
@@ -1854,13 +1791,61 @@ void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
}
+void Heap::RegisterNewArrayBufferHelper(std::map<void*, size_t>& live_buffers,
+ void* data, size_t length) {
+ live_buffers[data] = length;
+}
+
+
+void Heap::UnregisterArrayBufferHelper(
+ std::map<void*, size_t>& live_buffers,
+ std::map<void*, size_t>& not_yet_discovered_buffers, void* data) {
+ DCHECK(live_buffers.count(data) > 0);
+ live_buffers.erase(data);
+ not_yet_discovered_buffers.erase(data);
+}
+
+
+void Heap::RegisterLiveArrayBufferHelper(
+ std::map<void*, size_t>& not_yet_discovered_buffers, void* data) {
+ not_yet_discovered_buffers.erase(data);
+}
+
+
+size_t Heap::FreeDeadArrayBuffersHelper(
+ Isolate* isolate, std::map<void*, size_t>& live_buffers,
+ std::map<void*, size_t>& not_yet_discovered_buffers) {
+ size_t freed_memory = 0;
+ for (auto buffer = not_yet_discovered_buffers.begin();
+ buffer != not_yet_discovered_buffers.end(); ++buffer) {
+ isolate->array_buffer_allocator()->Free(buffer->first, buffer->second);
+ freed_memory += buffer->second;
+ live_buffers.erase(buffer->first);
+ }
+ not_yet_discovered_buffers = live_buffers;
+ return freed_memory;
+}
+
+
+void Heap::TearDownArrayBuffersHelper(
+ Isolate* isolate, std::map<void*, size_t>& live_buffers,
+ std::map<void*, size_t>& not_yet_discovered_buffers) {
+ for (auto buffer = live_buffers.begin(); buffer != live_buffers.end();
+ ++buffer) {
+ isolate->array_buffer_allocator()->Free(buffer->first, buffer->second);
+ }
+ live_buffers.clear();
+ not_yet_discovered_buffers.clear();
+}
+
+
void Heap::RegisterNewArrayBuffer(bool in_new_space, void* data,
size_t length) {
if (!data) return;
+ RegisterNewArrayBufferHelper(live_array_buffers_, data, length);
if (in_new_space) {
- live_array_buffers_for_scavenge_[data] = length;
- } else {
- live_array_buffers_[data] = length;
+ RegisterNewArrayBufferHelper(live_array_buffers_for_scavenge_, data,
+ length);
}
// We may go over the limit of externally allocated memory here. We call the
@@ -1872,75 +1857,54 @@ void Heap::RegisterNewArrayBuffer(bool in_new_space, void* data,
void Heap::UnregisterArrayBuffer(bool in_new_space, void* data) {
if (!data) return;
-
- std::map<void*, size_t>* live_buffers =
- in_new_space ? &live_array_buffers_for_scavenge_ : &live_array_buffers_;
- std::map<void*, size_t>* not_yet_discovered_buffers =
- in_new_space ? &not_yet_discovered_array_buffers_for_scavenge_
- : &not_yet_discovered_array_buffers_;
-
- DCHECK(live_buffers->count(data) > 0);
- live_buffers->erase(data);
- not_yet_discovered_buffers->erase(data);
+ UnregisterArrayBufferHelper(live_array_buffers_,
+ not_yet_discovered_array_buffers_, data);
+ if (in_new_space) {
+ UnregisterArrayBufferHelper(live_array_buffers_for_scavenge_,
+ not_yet_discovered_array_buffers_for_scavenge_,
+ data);
+ }
}
void Heap::RegisterLiveArrayBuffer(bool from_scavenge, void* data) {
// ArrayBuffer might be in the middle of being constructed.
if (data == undefined_value()) return;
- if (from_scavenge) {
- not_yet_discovered_array_buffers_for_scavenge_.erase(data);
- } else if (!not_yet_discovered_array_buffers_.erase(data)) {
- not_yet_discovered_array_buffers_for_scavenge_.erase(data);
- }
+ RegisterLiveArrayBufferHelper(
+ from_scavenge ? not_yet_discovered_array_buffers_for_scavenge_
+ : not_yet_discovered_array_buffers_,
+ data);
}
void Heap::FreeDeadArrayBuffers(bool from_scavenge) {
- size_t freed_memory = 0;
- for (auto& buffer : not_yet_discovered_array_buffers_for_scavenge_) {
- isolate()->array_buffer_allocator()->Free(buffer.first, buffer.second);
- freed_memory += buffer.second;
- live_array_buffers_for_scavenge_.erase(buffer.first);
- }
-
- if (!from_scavenge) {
- for (auto& buffer : not_yet_discovered_array_buffers_) {
- isolate()->array_buffer_allocator()->Free(buffer.first, buffer.second);
- freed_memory += buffer.second;
+ if (from_scavenge) {
+ for (auto& buffer : not_yet_discovered_array_buffers_for_scavenge_) {
+ not_yet_discovered_array_buffers_.erase(buffer.first);
live_array_buffers_.erase(buffer.first);
}
+ } else {
+ for (auto& buffer : not_yet_discovered_array_buffers_) {
+ // Scavenge can't happend during evacuation, so we only need to update
+ // live_array_buffers_for_scavenge_.
+ // not_yet_discovered_array_buffers_for_scanvenge_ will be reset before
+ // the next scavenge run in PrepareArrayBufferDiscoveryInNewSpace.
+ live_array_buffers_for_scavenge_.erase(buffer.first);
+ }
}
- not_yet_discovered_array_buffers_for_scavenge_ =
- live_array_buffers_for_scavenge_;
- if (!from_scavenge) not_yet_discovered_array_buffers_ = live_array_buffers_;
-
// Do not call through the api as this code is triggered while doing a GC.
- amount_of_external_allocated_memory_ -= freed_memory;
+ amount_of_external_allocated_memory_ -= FreeDeadArrayBuffersHelper(
+ isolate_,
+ from_scavenge ? live_array_buffers_for_scavenge_ : live_array_buffers_,
+ from_scavenge ? not_yet_discovered_array_buffers_for_scavenge_
+ : not_yet_discovered_array_buffers_);
}
void Heap::TearDownArrayBuffers() {
- size_t freed_memory = 0;
- for (auto& buffer : live_array_buffers_) {
- isolate()->array_buffer_allocator()->Free(buffer.first, buffer.second);
- freed_memory += buffer.second;
- }
- for (auto& buffer : live_array_buffers_for_scavenge_) {
- isolate()->array_buffer_allocator()->Free(buffer.first, buffer.second);
- freed_memory += buffer.second;
- }
- live_array_buffers_.clear();
- live_array_buffers_for_scavenge_.clear();
- not_yet_discovered_array_buffers_.clear();
- not_yet_discovered_array_buffers_for_scavenge_.clear();
-
- if (freed_memory > 0) {
- reinterpret_cast<v8::Isolate*>(isolate_)
- ->AdjustAmountOfExternalAllocatedMemory(
- -static_cast<int64_t>(freed_memory));
- }
+ TearDownArrayBuffersHelper(isolate_, live_array_buffers_,
+ not_yet_discovered_array_buffers_);
}
@@ -1958,7 +1922,7 @@ void Heap::PromoteArrayBuffer(Object* obj) {
// ArrayBuffer might be in the middle of being constructed.
if (data == undefined_value()) return;
DCHECK(live_array_buffers_for_scavenge_.count(data) > 0);
- live_array_buffers_[data] = live_array_buffers_for_scavenge_[data];
+ DCHECK(live_array_buffers_.count(data) > 0);
live_array_buffers_for_scavenge_.erase(data);
not_yet_discovered_array_buffers_for_scavenge_.erase(data);
}
@@ -2101,15 +2065,17 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
int end_of_region_offset;
if (helper.IsTagged(offset, size, &end_of_region_offset)) {
IterateAndMarkPointersToFromSpace(
- record_slots, obj_address + offset,
- obj_address + end_of_region_offset, &ScavengeObject);
+ target, obj_address + offset,
+ obj_address + end_of_region_offset, record_slots,
+ &ScavengeObject);
}
offset = end_of_region_offset;
}
} else {
#endif
- IterateAndMarkPointersToFromSpace(
- record_slots, obj_address, obj_address + size, &ScavengeObject);
+ IterateAndMarkPointersToFromSpace(target, obj_address,
+ obj_address + size, record_slots,
+ &ScavengeObject);
#if V8_DOUBLE_FIELDS_UNBOXING
}
#endif
@@ -2325,7 +2291,7 @@ class ScavengingVisitor : public StaticVisitorBase {
if (marks_handling == TRANSFER_MARKS) {
if (Marking::TransferColor(source, target)) {
- MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
+ MemoryChunk::IncrementLiveBytesFromGC(target, size);
}
}
}
@@ -2435,7 +2401,7 @@ class ScavengingVisitor : public StaticVisitorBase {
target->address() + JSFunction::kCodeEntryOffset;
Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
- code_entry_slot, code);
+ target, code_entry_slot, code);
}
}
@@ -2465,7 +2431,8 @@ class ScavengingVisitor : public StaticVisitorBase {
DCHECK(map_word.IsForwardingAddress());
FixedTypedArrayBase* target =
reinterpret_cast<FixedTypedArrayBase*>(map_word.ToForwardingAddress());
- target->set_base_pointer(target, SKIP_WRITE_BARRIER);
+ if (target->base_pointer() != Smi::FromInt(0))
+ target->set_base_pointer(target, SKIP_WRITE_BARRIER);
}
@@ -2478,7 +2445,8 @@ class ScavengingVisitor : public StaticVisitorBase {
DCHECK(map_word.IsForwardingAddress());
FixedTypedArrayBase* target =
reinterpret_cast<FixedTypedArrayBase*>(map_word.ToForwardingAddress());
- target->set_base_pointer(target, SKIP_WRITE_BARRIER);
+ if (target->base_pointer() != Smi::FromInt(0))
+ target->set_base_pointer(target, SKIP_WRITE_BARRIER);
}
@@ -2544,7 +2512,7 @@ class ScavengingVisitor : public StaticVisitorBase {
return;
}
- heap->DoScavengeObject(first->map(), slot, first);
+ Heap::ScavengeObjectSlow(slot, first);
object->set_map_word(MapWord::FromForwardingAddress(*slot));
return;
}
@@ -2634,7 +2602,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
MapWord first_word = object->map_word();
SLOW_DCHECK(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap();
- map->GetHeap()->DoScavengeObject(map, p, object);
+ map->GetHeap()->scavenging_visitors_table_.GetVisitor(map)(map, p, object);
}
@@ -2649,48 +2617,6 @@ void Heap::ConfigureInitialOldGenerationSize() {
}
-void Heap::ConfigureNewGenerationSize() {
- bool still_gathering_lifetime_data = gathering_lifetime_feedback_ != 0;
- if (gathering_lifetime_feedback_ != 0) gathering_lifetime_feedback_--;
- if (!new_space_high_promotion_mode_active_ &&
- new_space_.TotalCapacity() == new_space_.MaximumCapacity() &&
- IsStableOrIncreasingSurvivalTrend() && IsHighSurvivalRate()) {
- // Stable high survival rates even though young generation is at
- // maximum capacity indicates that most objects will be promoted.
- // To decrease scavenger pauses and final mark-sweep pauses, we
- // have to limit maximal capacity of the young generation.
- if (still_gathering_lifetime_data) {
- if (FLAG_trace_gc) {
- PrintPID(
- "Postpone entering high promotion mode as optimized pretenuring "
- "code is still being generated\n");
- }
- } else {
- new_space_high_promotion_mode_active_ = true;
- if (FLAG_trace_gc) {
- PrintPID("Limited new space size due to high promotion rate: %d MB\n",
- new_space_.InitialTotalCapacity() / MB);
- }
- }
- } else if (new_space_high_promotion_mode_active_ &&
- IsStableOrDecreasingSurvivalTrend() && IsLowSurvivalRate()) {
- // Decreasing low survival rates might indicate that the above high
- // promotion mode is over and we should allow the young generation
- // to grow again.
- new_space_high_promotion_mode_active_ = false;
- if (FLAG_trace_gc) {
- PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
- new_space_.MaximumCapacity() / MB);
- }
- }
-
- if (new_space_high_promotion_mode_active_ &&
- new_space_.TotalCapacity() > new_space_.InitialTotalCapacity()) {
- new_space_.Shrink();
- }
-}
-
-
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result = nullptr;
@@ -2708,8 +2634,9 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
reinterpret_cast<Map*>(result)
->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
}
- reinterpret_cast<Map*>(result)->set_inobject_properties(0);
- reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
+ reinterpret_cast<Map*>(result)->clear_unused();
+ reinterpret_cast<Map*>(result)
+ ->set_inobject_properties_or_constructor_function_index(0);
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
reinterpret_cast<Map*>(result)->set_bit_field(0);
reinterpret_cast<Map*>(result)->set_bit_field2(0);
@@ -2735,8 +2662,8 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER);
map->set_instance_size(instance_size);
- map->set_inobject_properties(0);
- map->set_pre_allocated_property_fields(0);
+ map->clear_unused();
+ map->set_inobject_properties_or_constructor_function_index(0);
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
@@ -2925,19 +2852,34 @@ bool Heap::CreateInitialMaps() {
#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
+#define ALLOCATE_PRIMITIVE_MAP(instance_type, size, field_name, \
+ constructor_function_index) \
+ { \
+ ALLOCATE_MAP((instance_type), (size), field_name); \
+ field_name##_map()->SetConstructorFunctionIndex( \
+ (constructor_function_index)); \
+ }
+
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
DCHECK(fixed_array_map() != fixed_cow_array_map());
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
- ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
+ ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
+ Context::NUMBER_FUNCTION_INDEX)
ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
mutable_heap_number)
- ALLOCATE_MAP(FLOAT32X4_TYPE, Float32x4::kSize, float32x4)
- ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
+ ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
+ Context::SYMBOL_FUNCTION_INDEX)
+#define ALLOCATE_SIMD128_MAP(TYPE, Type, type, lane_count, lane_type) \
+ ALLOCATE_PRIMITIVE_MAP(SIMD128_VALUE_TYPE, Type::kSize, type, \
+ Context::TYPE##_FUNCTION_INDEX)
+ SIMD128_TYPES(ALLOCATE_SIMD128_MAP)
+#undef ALLOCATE_SIMD128_MAP
ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
- ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
+ ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
+ Context::BOOLEAN_FUNCTION_INDEX);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
@@ -2950,9 +2892,10 @@ bool Heap::CreateInitialMaps() {
AllocationResult allocation = AllocateMap(entry.type, entry.size);
if (!allocation.To(&obj)) return false;
}
+ Map* map = Map::cast(obj);
+ map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
// Mark cons string maps as unstable, because their objects can change
// maps during GC.
- Map* map = Map::cast(obj);
if (StringShape(entry.type).IsCons()) map->mark_unstable();
roots_[entry.index] = map;
}
@@ -2961,20 +2904,16 @@ bool Heap::CreateInitialMaps() {
AllocationResult allocation = AllocateMap(EXTERNAL_ONE_BYTE_STRING_TYPE,
ExternalOneByteString::kSize);
if (!allocation.To(&obj)) return false;
- set_native_source_string_map(Map::cast(obj));
+ Map* map = Map::cast(obj);
+ map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
+ set_native_source_string_map(map);
}
ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
+ ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
-#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
- ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kSize, \
- external_##type##_array)
-
- TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
-#undef ALLOCATE_EXTERNAL_ARRAY_MAP
-
#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
@@ -3021,6 +2960,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
external_map()->set_is_extensible(false);
+#undef ALLOCATE_PRIMITIVE_MAP
#undef ALLOCATE_VARSIZE_MAP
#undef ALLOCATE_MAP
}
@@ -3030,18 +2970,15 @@ bool Heap::CreateInitialMaps() {
ByteArray* byte_array;
if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
set_empty_byte_array(byte_array);
- }
-
-#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
- { \
- ExternalArray* obj; \
- if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
- return false; \
- set_empty_external_##type##_array(obj); \
- }
- TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
-#undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
+ BytecodeArray* bytecode_array;
+ AllocationResult allocation =
+ AllocateBytecodeArray(0, nullptr, kPointerSize);
+ if (!allocation.To(&bytecode_array)) {
+ return false;
+ }
+ set_empty_bytecode_array(bytecode_array);
+ }
#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
{ \
@@ -3081,31 +3018,30 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
return result;
}
-
-AllocationResult Heap::AllocateFloat32x4(float w, float x, float y, float z,
- PretenureFlag pretenure) {
- // Statically ensure that it is safe to allocate SIMD values in paged
- // spaces.
- int size = Float32x4::kSize;
- STATIC_ASSERT(Float32x4::kSize <= Page::kMaxRegularHeapObjectSize);
-
- AllocationSpace space = SelectSpace(size, pretenure);
-
- HeapObject* result;
- {
- AllocationResult allocation =
- AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_no_write_barrier(float32x4_map());
- Float32x4* float32x4 = Float32x4::cast(result);
- float32x4->set_lane(0, w);
- float32x4->set_lane(1, x);
- float32x4->set_lane(2, y);
- float32x4->set_lane(3, z);
- return result;
-}
+#define SIMD_ALLOCATE_DEFINITION(TYPE, Type, type, lane_count, lane_type) \
+ AllocationResult Heap::Allocate##Type(lane_type lanes[lane_count], \
+ PretenureFlag pretenure) { \
+ int size = Type::kSize; \
+ STATIC_ASSERT(Type::kSize <= Page::kMaxRegularHeapObjectSize); \
+ \
+ AllocationSpace space = SelectSpace(size, pretenure); \
+ \
+ HeapObject* result; \
+ { \
+ AllocationResult allocation = \
+ AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned); \
+ if (!allocation.To(&result)) return allocation; \
+ } \
+ \
+ result->set_map_no_write_barrier(type##_map()); \
+ Type* instance = Type::cast(result); \
+ for (int i = 0; i < lane_count; i++) { \
+ instance->set_lane(i, lanes[i]); \
+ } \
+ return result; \
+ }
+SIMD128_TYPES(SIMD_ALLOCATE_DEFINITION)
+#undef SIMD_ALLOCATE_DEFINITION
AllocationResult Heap::AllocateCell(Object* value) {
@@ -3242,44 +3178,47 @@ void Heap::CreateInitialObjects() {
// Finish initializing oddballs after creating the string table.
Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
- factory->nan_value(), Oddball::kUndefined);
+ factory->nan_value(), "undefined", Oddball::kUndefined);
// Initialize the null_value.
Oddball::Initialize(isolate(), factory->null_value(), "null",
- handle(Smi::FromInt(0), isolate()), Oddball::kNull);
+ handle(Smi::FromInt(0), isolate()), "object",
+ Oddball::kNull);
set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
handle(Smi::FromInt(1), isolate()),
- Oddball::kTrue));
+ "boolean", Oddball::kTrue));
set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
handle(Smi::FromInt(0), isolate()),
- Oddball::kFalse));
+ "boolean", Oddball::kFalse));
set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
handle(Smi::FromInt(-1), isolate()),
- Oddball::kTheHole));
+ "undefined", Oddball::kTheHole));
- set_uninitialized_value(*factory->NewOddball(
- factory->uninitialized_map(), "uninitialized",
- handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));
+ set_uninitialized_value(
+ *factory->NewOddball(factory->uninitialized_map(), "uninitialized",
+ handle(Smi::FromInt(-1), isolate()), "undefined",
+ Oddball::kUninitialized));
- set_arguments_marker(*factory->NewOddball(
- factory->arguments_marker_map(), "arguments_marker",
- handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));
+ set_arguments_marker(
+ *factory->NewOddball(factory->arguments_marker_map(), "arguments_marker",
+ handle(Smi::FromInt(-4), isolate()), "undefined",
+ Oddball::kArgumentMarker));
set_no_interceptor_result_sentinel(*factory->NewOddball(
factory->no_interceptor_result_sentinel_map(),
"no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
- Oddball::kOther));
+ "undefined", Oddball::kOther));
set_termination_exception(*factory->NewOddball(
factory->termination_exception_map(), "termination_exception",
- handle(Smi::FromInt(-3), isolate()), Oddball::kOther));
+ handle(Smi::FromInt(-3), isolate()), "undefined", Oddball::kOther));
set_exception(*factory->NewOddball(factory->exception_map(), "exception",
handle(Smi::FromInt(-5), isolate()),
- Oddball::kException));
+ "undefined", Oddball::kException));
for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
Handle<String> str =
@@ -3365,6 +3304,9 @@ void Heap::CreateInitialObjects() {
set_extra_natives_source_cache(
*factory->NewFixedArray(ExtraNatives::GetBuiltinsCount()));
+ set_code_stub_natives_source_cache(
+ *factory->NewFixedArray(CodeStubNatives::GetBuiltinsCount()));
+
set_undefined_cell(*factory->NewCell(factory->undefined_value()));
// The symbol registry is initialized lazily.
@@ -3379,25 +3321,17 @@ void Heap::CreateInitialObjects() {
set_microtask_queue(empty_fixed_array());
{
- FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
- Handle<TypeFeedbackVector> dummy_vector =
- factory->NewTypeFeedbackVector(&spec);
- dummy_vector->Set(FeedbackVectorICSlot(0),
- *TypeFeedbackVector::MegamorphicSentinel(isolate()),
- SKIP_WRITE_BARRIER);
- set_keyed_load_dummy_vector(*dummy_vector);
- }
-
- if (FLAG_vector_stores) {
- FeedbackVectorSpec spec(0, Code::KEYED_STORE_IC);
+ Code::Kind kinds[] = {Code::LOAD_IC, Code::KEYED_LOAD_IC, Code::STORE_IC,
+ Code::KEYED_STORE_IC};
+ FeedbackVectorSpec spec(0, 4, kinds);
Handle<TypeFeedbackVector> dummy_vector =
factory->NewTypeFeedbackVector(&spec);
- dummy_vector->Set(FeedbackVectorICSlot(0),
- *TypeFeedbackVector::MegamorphicSentinel(isolate()),
- SKIP_WRITE_BARRIER);
- set_keyed_store_dummy_vector(*dummy_vector);
- } else {
- set_keyed_store_dummy_vector(empty_fixed_array());
+ for (int i = 0; i < 4; i++) {
+ dummy_vector->Set(FeedbackVectorICSlot(0),
+ *TypeFeedbackVector::MegamorphicSentinel(isolate()),
+ SKIP_WRITE_BARRIER);
+ }
+ set_dummy_vector(*dummy_vector);
}
set_detached_contexts(empty_fixed_array());
@@ -3427,6 +3361,11 @@ void Heap::CreateInitialObjects() {
set_weak_stack_trace_list(Smi::FromInt(0));
+ // Will be filled in by Interpreter::Initialize().
+ set_interpreter_table(
+ *interpreter::Interpreter::CreateUninitializedInterpreterTable(
+ isolate()));
+
set_allocation_sites_scratchpad(
*factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
InitializeAllocationSitesScratchpad();
@@ -3644,35 +3583,14 @@ void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
// candidates are not part of the global list of old space pages and
// releasing an evacuation candidate due to a slots buffer overflow
// results in lost pages.
- mark_compact_collector()->RecordSlot(slot, slot, *slot,
- SlotsBuffer::IGNORE_OVERFLOW);
+ mark_compact_collector()->RecordSlot(allocation_sites_scratchpad(), slot,
+ *slot, SlotsBuffer::IGNORE_OVERFLOW);
}
allocation_sites_scratchpad_length_++;
}
}
-Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
- return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
-}
-
-
-Heap::RootListIndex Heap::RootIndexForExternalArrayType(
- ExternalArrayType array_type) {
- switch (array_type) {
-#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- return kExternal##Type##ArrayMapRootIndex;
-
- TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
-#undef ARRAY_TYPE_TO_ROOT_INDEX
-
- default:
- UNREACHABLE();
- return kUndefinedValueRootIndex;
- }
-}
-
Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
@@ -3696,23 +3614,6 @@ Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
}
-Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
- ElementsKind elementsKind) {
- switch (elementsKind) {
-#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: \
- return kEmptyExternal##Type##ArrayRootIndex;
-
- TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
-#undef ELEMENT_KIND_TO_ROOT_INDEX
-
- default:
- UNREACHABLE();
- return kUndefinedValueRootIndex;
- }
-}
-
-
Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
ElementsKind elementsKind) {
switch (elementsKind) {
@@ -3729,12 +3630,6 @@ Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
}
-ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
- return ExternalArray::cast(
- roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
-}
-
-
FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
return FixedTypedArrayBase::cast(
roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
@@ -3772,6 +3667,30 @@ AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
}
+AllocationResult Heap::AllocateBytecodeArray(int length,
+ const byte* const raw_bytecodes,
+ int frame_size) {
+ if (length < 0 || length > BytecodeArray::kMaxLength) {
+ v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
+ }
+
+ int size = BytecodeArray::SizeFor(length);
+ HeapObject* result;
+ {
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
+ if (!allocation.To(&result)) return allocation;
+ }
+
+ result->set_map_no_write_barrier(bytecode_array_map());
+ BytecodeArray* instance = BytecodeArray::cast(result);
+ instance->set_length(length);
+ instance->set_frame_size(frame_size);
+ CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
+
+ return result;
+}
+
+
void Heap::CreateFillerObjectAt(Address addr, int size) {
if (size == 0) return;
HeapObject* filler = HeapObject::FromAddress(addr);
@@ -3806,13 +3725,13 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
}
-void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
+void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
if (incremental_marking()->IsMarking() &&
- Marking::IsBlack(Marking::MarkBitFrom(address))) {
+ Marking::IsBlack(Marking::MarkBitFrom(object->address()))) {
if (mode == SEQUENTIAL_TO_SWEEPER) {
- MemoryChunk::IncrementLiveBytesFromGC(address, by);
+ MemoryChunk::IncrementLiveBytesFromGC(object, by);
} else {
- MemoryChunk::IncrementLiveBytesFromMutator(address, by);
+ MemoryChunk::IncrementLiveBytesFromMutator(object, by);
}
}
}
@@ -3858,8 +3777,8 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
FixedArrayBase::cast(HeapObject::FromAddress(new_start));
// Maintain consistency of live bytes during incremental marking
- marking()->TransferMark(object->address(), new_start);
- AdjustLiveBytes(new_start, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
+ Marking::TransferMark(this, object->address(), new_start);
+ AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
// Notify the heap profiler of change in object layout.
OnMoveEvent(new_object, object, new_object->Size());
@@ -3920,7 +3839,7 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
object->synchronized_set_length(len - elements_to_trim);
// Maintain consistency of live bytes during incremental marking
- AdjustLiveBytes(object->address(), -bytes_to_trim, mode);
+ AdjustLiveBytes(object, -bytes_to_trim, mode);
// Notify the heap profiler of change in object layout. The array may not be
// moved during GC, and size has to be adjusted nevertheless.
@@ -3931,11 +3850,10 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
}
-AllocationResult Heap::AllocateExternalArray(int length,
- ExternalArrayType array_type,
- void* external_pointer,
- PretenureFlag pretenure) {
- int size = ExternalArray::kSize;
+AllocationResult Heap::AllocateFixedTypedArrayWithExternalPointer(
+ int length, ExternalArrayType array_type, void* external_pointer,
+ PretenureFlag pretenure) {
+ int size = FixedTypedArrayBase::kHeaderSize;
AllocationSpace space = SelectSpace(size, pretenure);
HeapObject* result;
{
@@ -3943,10 +3861,12 @@ AllocationResult Heap::AllocateExternalArray(int length,
if (!allocation.To(&result)) return allocation;
}
- result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
- ExternalArray::cast(result)->set_length(length);
- ExternalArray::cast(result)->set_external_pointer(external_pointer);
- return result;
+ result->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
+ FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(result);
+ elements->set_base_pointer(Smi::FromInt(0), SKIP_WRITE_BARRIER);
+ elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
+ elements->set_length(length);
+ return elements;
}
static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
@@ -3986,9 +3906,12 @@ AllocationResult Heap::AllocateFixedTypedArray(int length,
array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
if (!allocation.To(&object)) return allocation;
- object->set_map(MapForFixedTypedArray(array_type));
+ object->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
+ elements->set_external_pointer(
+ ExternalReference::fixed_typed_array_base_data_offset().address(),
+ SKIP_WRITE_BARRIER);
elements->set_length(length);
if (initialize) memset(elements->DataPtr(), 0, elements->DataSize());
return elements;
@@ -4176,8 +4099,7 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
AllocationResult Heap::AllocateJSObjectFromMap(
- Map* map, PretenureFlag pretenure, bool allocate_properties,
- AllocationSite* allocation_site) {
+ Map* map, PretenureFlag pretenure, AllocationSite* allocation_site) {
// JSFunctions should be allocated using AllocateFunction to be
// properly initialized.
DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
@@ -4188,17 +4110,7 @@ AllocationResult Heap::AllocateJSObjectFromMap(
DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
// Allocate the backing storage for the properties.
- FixedArray* properties;
- if (allocate_properties) {
- int prop_size = map->InitialPropertiesLength();
- DCHECK(prop_size >= 0);
- {
- AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
- if (!allocation.To(&properties)) return allocation;
- }
- } else {
- properties = empty_fixed_array();
- }
+ FixedArray* properties = empty_fixed_array();
// Allocate the JSObject.
int size = map->instance_size();
@@ -4209,8 +4121,7 @@ AllocationResult Heap::AllocateJSObjectFromMap(
// Initialize the JSObject.
InitializeJSObjectFromMap(js_obj, properties, map);
- DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
- js_obj->HasFixedTypedArrayElements());
+ DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements());
return js_obj;
}
@@ -4222,7 +4133,7 @@ AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
// Allocate the object based on the constructors initial map.
AllocationResult allocation = AllocateJSObjectFromMap(
- constructor->initial_map(), pretenure, true, allocation_site);
+ constructor->initial_map(), pretenure, allocation_site);
#ifdef DEBUG
// Make sure result is NOT a global object if valid.
HeapObject* obj;
@@ -4502,12 +4413,6 @@ AllocationResult Heap::AllocateEmptyFixedArray() {
}
-AllocationResult Heap::AllocateEmptyExternalArray(
- ExternalArrayType array_type) {
- return AllocateExternalArray(0, array_type, NULL, TENURED);
-}
-
-
AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
if (!InNewSpace(src)) {
return src;
@@ -4523,7 +4428,7 @@ AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
FixedArray* result = FixedArray::cast(obj);
result->set_length(len);
- // Copy the content
+ // Copy the content.
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
@@ -4542,6 +4447,29 @@ AllocationResult Heap::AllocateEmptyFixedTypedArray(
}
+AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
+ PretenureFlag pretenure) {
+ int old_len = src->length();
+ int new_len = old_len + grow_by;
+ DCHECK(new_len >= old_len);
+ HeapObject* obj;
+ {
+ AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
+ if (!allocation.To(&obj)) return allocation;
+ }
+ obj->set_map_no_write_barrier(fixed_array_map());
+ FixedArray* result = FixedArray::cast(obj);
+ result->set_length(new_len);
+
+ // Copy the content.
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
+ MemsetPointer(result->data_start() + old_len, undefined_value(), grow_by);
+ return result;
+}
+
+
AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
int len = src->length();
HeapObject* obj;
@@ -4559,7 +4487,7 @@ AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
FixedArray* result = FixedArray::cast(obj);
result->set_length(len);
- // Copy the content
+ // Copy the content.
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
@@ -4830,10 +4758,14 @@ void Heap::ReduceNewSpaceSize() {
// TODO(ulan): Unify this constant with the similar constant in
// GCIdleTimeHandler once the change is merged to 4.5.
static const size_t kLowAllocationThroughput = 1000;
- size_t allocation_throughput =
+ const size_t allocation_throughput =
tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
- if (FLAG_predictable || allocation_throughput == 0) return;
- if (allocation_throughput < kLowAllocationThroughput) {
+
+ if (FLAG_predictable) return;
+
+ if (ShouldReduceMemory() ||
+ ((allocation_throughput != 0) &&
+ (allocation_throughput < kLowAllocationThroughput))) {
new_space_.Shrink();
UncommitFromSpace();
}
@@ -4857,7 +4789,8 @@ bool Heap::TryFinalizeIdleIncrementalMarking(
gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
static_cast<size_t>(idle_time_in_ms), size_of_objects,
final_incremental_mark_compact_speed_in_bytes_per_ms))) {
- CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
+ CollectAllGarbage(current_gc_flags(),
+ "idle notification: finalize incremental");
return true;
}
return false;
@@ -4970,6 +4903,16 @@ void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
static_cast<int>(idle_time_in_ms));
+ if (deadline_in_ms - start_ms >
+ GCIdleTimeHandler::kMaxFrameRenderingIdleTime) {
+ int committed_memory = static_cast<int>(CommittedMemory() / KB);
+ int used_memory = static_cast<int>(heap_state.size_of_objects / KB);
+ isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
+ start_ms, committed_memory);
+ isolate()->counters()->aggregated_memory_heap_used()->AddSample(
+ start_ms, used_memory);
+ }
+
if (deadline_difference >= 0) {
if (action.type != DONE && action.type != DO_NOTHING) {
isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
@@ -5211,6 +5154,11 @@ void Heap::Verify() {
code_space_->Verify(&no_dirty_regions_visitor);
lo_space_->Verify();
+
+ mark_compact_collector_.VerifyWeakEmbeddedObjectsInCode();
+ if (FLAG_omit_map_checks_for_leaf_maps) {
+ mark_compact_collector_.VerifyOmittedMapChecks();
+ }
}
#endif
@@ -5229,33 +5177,33 @@ void Heap::ZapFromSpace() {
}
-void Heap::IterateAndMarkPointersToFromSpace(bool record_slots, Address start,
- Address end,
+void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
+ Address end, bool record_slots,
ObjectSlotCallback callback) {
Address slot_address = start;
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
- Object* object = *slot;
+ Object* target = *slot;
// If the store buffer becomes overfull we mark pages as being exempt from
// the store buffer. These pages are scanned to find pointers that point
// to the new space. In that case we may hit newly promoted objects and
// fix the pointers before the promotion queue gets to them. Thus the 'if'.
- if (object->IsHeapObject()) {
- if (Heap::InFromSpace(object)) {
+ if (target->IsHeapObject()) {
+ if (Heap::InFromSpace(target)) {
callback(reinterpret_cast<HeapObject**>(slot),
- HeapObject::cast(object));
- Object* new_object = *slot;
- if (InNewSpace(new_object)) {
- SLOW_DCHECK(Heap::InToSpace(new_object));
- SLOW_DCHECK(new_object->IsHeapObject());
+ HeapObject::cast(target));
+ Object* new_target = *slot;
+ if (InNewSpace(new_target)) {
+ SLOW_DCHECK(Heap::InToSpace(new_target));
+ SLOW_DCHECK(new_target->IsHeapObject());
store_buffer_.EnterDirectlyIntoStoreBuffer(
reinterpret_cast<Address>(slot));
}
- SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
+ SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
} else if (record_slots &&
- MarkCompactCollector::IsOnEvacuationCandidate(object)) {
- mark_compact_collector()->RecordSlot(slot, slot, object);
+ MarkCompactCollector::IsOnEvacuationCandidate(target)) {
+ mark_compact_collector()->RecordSlot(object, slot, target);
}
}
slot_address += kPointerSize;
@@ -5401,6 +5349,13 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
max_executable_size_ = static_cast<intptr_t>(FLAG_max_executable_size) * MB;
}
+ if (Page::kPageSize > MB) {
+ max_semi_space_size_ = ROUND_UP(max_semi_space_size_, Page::kPageSize);
+ max_old_generation_size_ =
+ ROUND_UP(max_old_generation_size_, Page::kPageSize);
+ max_executable_size_ = ROUND_UP(max_executable_size_, Page::kPageSize);
+ }
+
if (FLAG_stress_compaction) {
// This will cause more frequent GCs when stressing.
max_semi_space_size_ = Page::kPageSize;
@@ -5426,12 +5381,6 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
reserved_semispace_size_ = max_semi_space_size_;
}
- // The max executable size must be less than or equal to the max old
- // generation size.
- if (max_executable_size_ > max_old_generation_size_) {
- max_executable_size_ = max_old_generation_size_;
- }
-
// The new space size must be a power of two to support single-bit testing
// for containment.
max_semi_space_size_ =
@@ -5450,7 +5399,8 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
max_semi_space_size_ / MB);
}
} else {
- initial_semispace_size_ = initial_semispace_size;
+ initial_semispace_size_ =
+ ROUND_UP(initial_semispace_size, Page::kPageSize);
}
}
@@ -5475,7 +5425,7 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
max_semi_space_size_ / MB);
}
} else {
- target_semispace_size_ = target_semispace_size;
+ target_semispace_size_ = ROUND_UP(target_semispace_size, Page::kPageSize);
}
}
@@ -5491,6 +5441,12 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
max_old_generation_size_);
+ // The max executable size must be less than or equal to the max old
+ // generation size.
+ if (max_executable_size_ > max_old_generation_size_) {
+ max_executable_size_ = max_old_generation_size_;
+ }
+
if (FLAG_initial_old_space_size > 0) {
initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
} else {
@@ -5695,8 +5651,7 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
factor = Min(factor, kConservativeHeapGrowingFactor);
}
- if (FLAG_stress_compaction ||
- mark_compact_collector()->reduce_memory_footprint_) {
+ if (FLAG_stress_compaction || ShouldReduceMemory()) {
factor = kMinHeapGrowingFactor;
}
@@ -5786,8 +5741,6 @@ bool Heap::SetUp() {
base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
- MarkMapPointersAsEncoded(false);
-
// Set up memory allocator.
if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
return false;
@@ -5799,8 +5752,7 @@ bool Heap::SetUp() {
new_space_top_after_last_gc_ = new_space()->top();
// Initialize old space.
- old_space_ =
- new OldSpace(this, max_old_generation_size_, OLD_SPACE, NOT_EXECUTABLE);
+ old_space_ = new OldSpace(this, OLD_SPACE, NOT_EXECUTABLE);
if (old_space_ == NULL) return false;
if (!old_space_->SetUp()) return false;
@@ -5808,20 +5760,19 @@ bool Heap::SetUp() {
// Initialize the code space, set its maximum capacity to the old
// generation size. It needs executable memory.
- code_space_ =
- new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
+ code_space_ = new OldSpace(this, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false;
if (!code_space_->SetUp()) return false;
// Initialize map space.
- map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
+ map_space_ = new MapSpace(this, MAP_SPACE);
if (map_space_ == NULL) return false;
if (!map_space_->SetUp()) return false;
// The large object code space may contain code or data. We set the memory
// to be non-executable here for safety, but this means we need to enable it
// explicitly when allocating large code objects.
- lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
+ lo_space_ = new LargeObjectSpace(this, LO_SPACE);
if (lo_space_ == NULL) return false;
if (!lo_space_->SetUp()) return false;
@@ -5988,16 +5939,16 @@ void Heap::TearDown() {
}
-void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
+void Heap::AddGCPrologueCallback(v8::Isolate::GCCallback callback,
GCType gc_type, bool pass_isolate) {
DCHECK(callback != NULL);
- GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
+ GCCallbackPair pair(callback, gc_type, pass_isolate);
DCHECK(!gc_prologue_callbacks_.Contains(pair));
return gc_prologue_callbacks_.Add(pair);
}
-void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
+void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallback callback) {
DCHECK(callback != NULL);
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_prologue_callbacks_[i].callback == callback) {
@@ -6009,16 +5960,16 @@ void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
}
-void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
+void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
GCType gc_type, bool pass_isolate) {
DCHECK(callback != NULL);
- GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
+ GCCallbackPair pair(callback, gc_type, pass_isolate);
DCHECK(!gc_epilogue_callbacks_.Contains(pair));
return gc_epilogue_callbacks_.Add(pair);
}
-void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
+void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback) {
DCHECK(callback != NULL);
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_epilogue_callbacks_[i].callback == callback) {
@@ -6153,17 +6104,7 @@ OldSpace* OldSpaces::next() {
SpaceIterator::SpaceIterator(Heap* heap)
- : heap_(heap),
- current_space_(FIRST_SPACE),
- iterator_(NULL),
- size_func_(NULL) {}
-
-
-SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
- : heap_(heap),
- current_space_(FIRST_SPACE),
- iterator_(NULL),
- size_func_(size_func) {}
+ : heap_(heap), current_space_(FIRST_SPACE), iterator_(NULL) {}
SpaceIterator::~SpaceIterator() {
@@ -6200,19 +6141,19 @@ ObjectIterator* SpaceIterator::CreateIterator() {
switch (current_space_) {
case NEW_SPACE:
- iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
+ iterator_ = new SemiSpaceIterator(heap_->new_space());
break;
case OLD_SPACE:
- iterator_ = new HeapObjectIterator(heap_->old_space(), size_func_);
+ iterator_ = new HeapObjectIterator(heap_->old_space());
break;
case CODE_SPACE:
- iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
+ iterator_ = new HeapObjectIterator(heap_->code_space());
break;
case MAP_SPACE:
- iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
+ iterator_ = new HeapObjectIterator(heap_->map_space());
break;
case LO_SPACE:
- iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
+ iterator_ = new LargeObjectIterator(heap_->lo_space());
break;
}
@@ -6240,6 +6181,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
}
bool SkipObject(HeapObject* object) {
+ if (object->IsFiller()) return true;
MarkBit mark_bit = Marking::MarkBitFrom(object);
return Marking::IsWhite(mark_bit);
}
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 0f0cfc15fc..39c7055f8e 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -10,7 +10,6 @@
#include "src/allocation.h"
#include "src/assert-scope.h"
-#include "src/counters.h"
#include "src/globals.h"
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
@@ -21,7 +20,6 @@
#include "src/heap/spaces.h"
#include "src/heap/store-buffer.h"
#include "src/list.h"
-#include "src/splay-tree-inl.h"
namespace v8 {
namespace internal {
@@ -48,6 +46,12 @@ namespace internal {
V(Map, heap_number_map, HeapNumberMap) \
V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
V(Map, float32x4_map, Float32x4Map) \
+ V(Map, int32x4_map, Int32x4Map) \
+ V(Map, bool32x4_map, Bool32x4Map) \
+ V(Map, int16x8_map, Int16x8Map) \
+ V(Map, bool16x8_map, Bool16x8Map) \
+ V(Map, int8x16_map, Int8x16Map) \
+ V(Map, bool8x16_map, Bool8x16Map) \
V(Map, native_context_map, NativeContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
V(Map, code_map, CodeMap) \
@@ -105,25 +109,6 @@ namespace internal {
V(Map, short_external_one_byte_internalized_string_map, \
ShortExternalOneByteInternalizedStringMap) \
V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap) \
- V(Map, external_int8_array_map, ExternalInt8ArrayMap) \
- V(Map, external_uint8_array_map, ExternalUint8ArrayMap) \
- V(Map, external_int16_array_map, ExternalInt16ArrayMap) \
- V(Map, external_uint16_array_map, ExternalUint16ArrayMap) \
- V(Map, external_int32_array_map, ExternalInt32ArrayMap) \
- V(Map, external_uint32_array_map, ExternalUint32ArrayMap) \
- V(Map, external_float32_array_map, ExternalFloat32ArrayMap) \
- V(Map, external_float64_array_map, ExternalFloat64ArrayMap) \
- V(Map, external_uint8_clamped_array_map, ExternalUint8ClampedArrayMap) \
- V(ExternalArray, empty_external_int8_array, EmptyExternalInt8Array) \
- V(ExternalArray, empty_external_uint8_array, EmptyExternalUint8Array) \
- V(ExternalArray, empty_external_int16_array, EmptyExternalInt16Array) \
- V(ExternalArray, empty_external_uint16_array, EmptyExternalUint16Array) \
- V(ExternalArray, empty_external_int32_array, EmptyExternalInt32Array) \
- V(ExternalArray, empty_external_uint32_array, EmptyExternalUint32Array) \
- V(ExternalArray, empty_external_float32_array, EmptyExternalFloat32Array) \
- V(ExternalArray, empty_external_float64_array, EmptyExternalFloat64Array) \
- V(ExternalArray, empty_external_uint8_clamped_array, \
- EmptyExternalUint8ClampedArray) \
V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \
V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \
V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \
@@ -177,6 +162,7 @@ namespace internal {
V(FixedArray, experimental_natives_source_cache, \
ExperimentalNativesSourceCache) \
V(FixedArray, extra_natives_source_cache, ExtraNativesSourceCache) \
+ V(FixedArray, code_stub_natives_source_cache, CodeStubNativesSourceCache) \
V(Script, empty_script, EmptyScript) \
V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
V(Cell, undefined_cell, UndefinedCell) \
@@ -187,14 +173,19 @@ namespace internal {
V(FixedArray, materialized_objects, MaterializedObjects) \
V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \
V(FixedArray, microtask_queue, MicrotaskQueue) \
- V(FixedArray, keyed_load_dummy_vector, KeyedLoadDummyVector) \
- V(FixedArray, keyed_store_dummy_vector, KeyedStoreDummyVector) \
+ V(FixedArray, dummy_vector, DummyVector) \
V(FixedArray, detached_contexts, DetachedContexts) \
V(ArrayList, retained_maps, RetainedMaps) \
V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable) \
V(PropertyCell, array_protector, ArrayProtector) \
V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
- V(Object, weak_stack_trace_list, WeakStackTraceList)
+ V(Object, weak_stack_trace_list, WeakStackTraceList) \
+ V(Object, code_stub_context, CodeStubContext) \
+ V(JSObject, code_stub_exports_object, CodeStubExportsObject) \
+ V(FixedArray, interpreter_table, InterpreterTable) \
+ V(Map, bytecode_array_map, BytecodeArrayMap) \
+ V(BytecodeArray, empty_bytecode_array, EmptyBytecodeArray)
+
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
@@ -224,6 +215,20 @@ namespace internal {
V(constructor_string, "constructor") \
V(dot_result_string, ".result") \
V(eval_string, "eval") \
+ V(float32x4_string, "float32x4") \
+ V(Float32x4_string, "Float32x4") \
+ V(int32x4_string, "int32x4") \
+ V(Int32x4_string, "Int32x4") \
+ V(bool32x4_string, "bool32x4") \
+ V(Bool32x4_string, "Bool32x4") \
+ V(int16x8_string, "int16x8") \
+ V(Int16x8_string, "Int16x8") \
+ V(bool16x8_string, "bool16x8") \
+ V(Bool16x8_string, "Bool16x8") \
+ V(int8x16_string, "int8x16") \
+ V(Int8x16_string, "Int8x16") \
+ V(bool8x16_string, "bool8x16") \
+ V(Bool8x16_string, "Bool8x16") \
V(function_string, "function") \
V(Function_string, "Function") \
V(length_string, "length") \
@@ -270,7 +275,6 @@ namespace internal {
V(toJSON_string, "toJSON") \
V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
- V(stack_overflow_string, "$stackOverflowBoilerplate") \
V(illegal_access_string, "illegal access") \
V(cell_value_string, "%cell_value") \
V(illegal_argument_string, "illegal argument") \
@@ -312,12 +316,12 @@ namespace internal {
V(intl_impl_object_symbol) \
V(promise_debug_marker_symbol) \
V(promise_has_handler_symbol) \
- V(class_script_symbol) \
V(class_start_position_symbol) \
V(class_end_position_symbol) \
V(error_start_pos_symbol) \
V(error_end_pos_symbol) \
- V(error_script_symbol)
+ V(error_script_symbol) \
+ V(internal_error_symbol)
#define PUBLIC_SYMBOL_LIST(V) \
V(has_instance_symbol, symbolHasInstance, Symbol.hasInstance) \
@@ -332,6 +336,7 @@ namespace internal {
// skip write barriers. This list is not complete and has omissions.
#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
V(ByteArrayMap) \
+ V(BytecodeArrayMap) \
V(FreeSpaceMap) \
V(OnePointerFillerMap) \
V(TwoPointerFillerMap) \
@@ -348,6 +353,12 @@ namespace internal {
V(HeapNumberMap) \
V(MutableHeapNumberMap) \
V(Float32x4Map) \
+ V(Int32x4Map) \
+ V(Bool32x4Map) \
+ V(Int16x8Map) \
+ V(Bool16x8Map) \
+ V(Int8x16Map) \
+ V(Bool8x16Map) \
V(NativeContextMap) \
V(FixedArrayMap) \
V(CodeMap) \
@@ -360,6 +371,7 @@ namespace internal {
V(OrderedHashTableMap) \
V(EmptyFixedArray) \
V(EmptyByteArray) \
+ V(EmptyBytecodeArray) \
V(EmptyDescriptorArray) \
V(ArgumentsMarker) \
V(SymbolMap) \
@@ -780,7 +792,7 @@ class Heap {
enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER };
// Maintain consistency of live bytes during incremental marking.
- void AdjustLiveBytes(Address address, int by, InvocationMode mode);
+ void AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode);
// Trim the given array from the left. Note that this relocates the object
// start and hence is only valid if there is only a single reference to it.
@@ -868,44 +880,40 @@ class Heap {
PromotionQueue* promotion_queue() { return &promotion_queue_; }
- void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
+ void AddGCPrologueCallback(v8::Isolate::GCCallback callback,
GCType gc_type_filter, bool pass_isolate = true);
- void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback);
+ void RemoveGCPrologueCallback(v8::Isolate::GCCallback callback);
- void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
+ void AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
GCType gc_type_filter, bool pass_isolate = true);
- void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback);
+ void RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback);
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
// TODO(1490): Try removing the unchecked accessors, now that GC marking does
// not corrupt the map.
-#define ROOT_ACCESSOR(type, name, camel_name) \
- type* name() { return type::cast(roots_[k##camel_name##RootIndex]); } \
- type* raw_unchecked_##name() { \
- return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
+#define ROOT_ACCESSOR(type, name, camel_name) \
+ inline type* name(); \
+ type* raw_unchecked_##name() { \
+ return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
// Utility type maps
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
- Map* name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); }
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map();
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
-#define STRING_ACCESSOR(name, str) \
- String* name() { return String::cast(roots_[k##name##RootIndex]); }
+#define STRING_ACCESSOR(name, str) inline String* name();
INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
#undef STRING_ACCESSOR
-#define SYMBOL_ACCESSOR(name) \
- Symbol* name() { return Symbol::cast(roots_[k##name##RootIndex]); }
+#define SYMBOL_ACCESSOR(name) inline Symbol* name();
PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
-#define SYMBOL_ACCESSOR(name, varname, description) \
- Symbol* name() { return Symbol::cast(roots_[k##name##RootIndex]); }
+#define SYMBOL_ACCESSOR(name, varname, description) inline Symbol* name();
PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
@@ -939,7 +947,7 @@ class Heap {
Object* encountered_weak_cells() const { return encountered_weak_cells_; }
// Number of mark-sweeps.
- unsigned int ms_count() { return ms_count_; }
+ int ms_count() const { return ms_count_; }
// Iterates over all roots in the heap.
void IterateRoots(ObjectVisitor* v, VisitMode mode);
@@ -952,9 +960,9 @@ class Heap {
void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
// Iterate pointers to from semispace of new space found in memory interval
- // from start to end.
- void IterateAndMarkPointersToFromSpace(bool record_slots, Address start,
- Address end,
+ // from start to end within |object|.
+ void IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
+ Address end, bool record_slots,
ObjectSlotCallback callback);
// Returns whether the object resides in new space.
@@ -990,13 +998,6 @@ class Heap {
roots_[kCodeStubsRootIndex] = value;
}
- // Support for computing object sizes for old objects during GCs. Returns
- // a function that is guaranteed to be safe for computing object sizes in
- // the current GC phase.
- HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
- return gc_safe_size_of_old_object_;
- }
-
// Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) {
roots_[kNonMonomorphicCacheRootIndex] = value;
@@ -1094,6 +1095,9 @@ class Heap {
static inline void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+ // Slow part of scavenge object.
+ static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
+
enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT };
// If an object has an AllocationMemento trailing it, return it, otherwise
@@ -1269,12 +1273,7 @@ class Heap {
Map* MapForFixedTypedArray(ExternalArrayType array_type);
RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
- Map* MapForExternalArrayType(ExternalArrayType array_type);
- RootListIndex RootIndexForExternalArrayType(ExternalArrayType array_type);
-
- RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind);
RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
- ExternalArray* EmptyExternalArrayForMap(Map* map);
FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
void RecordStats(HeapStats* stats, bool take_snapshot = false);
@@ -1341,8 +1340,6 @@ class Heap {
// scavenge operation.
inline bool ShouldBePromoted(Address old_address, int object_size);
- void ClearJSFunctionResultCaches();
-
void ClearNormalizedMapCaches();
GCTracer* tracer() { return &tracer_; }
@@ -1393,10 +1390,6 @@ class Heap {
return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_;
}
- // Record the fact that we generated some optimized code since the last GC
- // which will pretenure some previously unpretenured allocation.
- void RecordDeoptForPretenuring() { gathering_lifetime_feedback_ = 2; }
-
// Update GC statistics that are tracked on the Heap.
void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
double marking_time);
@@ -1418,17 +1411,12 @@ class Heap {
StoreBuffer* store_buffer() { return &store_buffer_; }
- Marking* marking() { return &marking_; }
-
IncrementalMarking* incremental_marking() { return &incremental_marking_; }
ExternalStringTable* external_string_table() {
return &external_string_table_;
}
- // Returns the current sweep generation.
- int sweep_generation() { return sweep_generation_; }
-
bool concurrent_sweeping_enabled() { return concurrent_sweeping_enabled_; }
inline Isolate* isolate();
@@ -1438,10 +1426,6 @@ class Heap {
inline bool OldGenerationAllocationLimitReached();
- inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
- scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
- }
-
void QueueMemoryChunkForFree(MemoryChunk* chunk);
void FreeQueuedChunks();
@@ -1456,39 +1440,14 @@ class Heap {
// The roots that have an index less than this are always in old space.
static const int kOldSpaceRoots = 0x20;
- uint32_t HashSeed() {
- uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
- DCHECK(FLAG_randomize_hashes || seed == 0);
- return seed;
- }
-
- Smi* NextScriptId() {
- int next_id = last_script_id()->value() + 1;
- if (!Smi::IsValid(next_id) || next_id < 0) next_id = 1;
- Smi* next_id_smi = Smi::FromInt(next_id);
- set_last_script_id(next_id_smi);
- return next_id_smi;
- }
-
- void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
- DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
- set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
- }
-
- void SetConstructStubDeoptPCOffset(int pc_offset) {
- DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
- set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
- }
+ inline uint32_t HashSeed();
- void SetGetterStubDeoptPCOffset(int pc_offset) {
- DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
- set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
- }
+ inline Smi* NextScriptId();
- void SetSetterStubDeoptPCOffset(int pc_offset) {
- DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
- set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
- }
+ inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
+ inline void SetConstructStubDeoptPCOffset(int pc_offset);
+ inline void SetGetterStubDeoptPCOffset(int pc_offset);
+ inline void SetSetterStubDeoptPCOffset(int pc_offset);
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
@@ -1664,7 +1623,6 @@ class Heap {
// points to the site.
MUST_USE_RESULT AllocationResult
AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
- bool alloc_props = true,
AllocationSite* allocation_site = NULL);
// Allocates a HeapNumber from value.
@@ -1672,15 +1630,22 @@ class Heap {
AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
PretenureFlag pretenure = NOT_TENURED);
- // Allocates a Float32x4 from the given lane values.
- MUST_USE_RESULT AllocationResult
- AllocateFloat32x4(float w, float x, float y, float z,
- PretenureFlag pretenure = NOT_TENURED);
+// Allocates SIMD values from the given lane values.
+#define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \
+ AllocationResult Allocate##Type(lane_type lanes[lane_count], \
+ PretenureFlag pretenure = NOT_TENURED);
+ SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION)
+#undef SIMD_ALLOCATE_DECLARATION
// Allocates a byte array of the specified length
MUST_USE_RESULT AllocationResult
AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
+ // Allocates a bytecode array with given contents.
+ MUST_USE_RESULT AllocationResult
+ AllocateBytecodeArray(int length, const byte* raw_bytecodes,
+ int frame_size);
+
// Copy the code and scope info part of the code object, but insert
// the provided data as the relocation information.
MUST_USE_RESULT AllocationResult
@@ -1699,6 +1664,25 @@ class Heap {
private:
Heap();
+ int current_gc_flags() { return current_gc_flags_; }
+ void set_current_gc_flags(int flags) {
+ current_gc_flags_ = flags;
+ DCHECK(!ShouldFinalizeIncrementalMarking() ||
+ !ShouldAbortIncrementalMarking());
+ }
+
+ inline bool ShouldReduceMemory() const {
+ return current_gc_flags_ & kReduceMemoryFootprintMask;
+ }
+
+ inline bool ShouldAbortIncrementalMarking() const {
+ return current_gc_flags_ & kAbortIncrementalMarkingMask;
+ }
+
+ inline bool ShouldFinalizeIncrementalMarking() const {
+ return current_gc_flags_ & kFinalizeIncrementalMarkingMask;
+ }
+
// The amount of external memory registered through the API kept alive
// by global handles
int64_t amount_of_external_allocated_memory_;
@@ -1730,9 +1714,6 @@ class Heap {
// ... and since the last scavenge.
int survived_last_scavenge_;
- // For keeping track on when to flush RegExp code.
- int sweep_generation_;
-
int always_allocate_scope_depth_;
// For keeping track of context disposals.
@@ -1777,15 +1758,8 @@ class Heap {
// Total length of the strings we failed to flatten since the last GC.
int unflattened_strings_length_;
-#define ROOT_ACCESSOR(type, name, camel_name) \
- inline void set_##name(type* value) { \
- /* The deserializer makes use of the fact that these common roots are */ \
- /* never in new space and never on a page that is being compacted. */ \
- DCHECK(!deserialization_complete() || \
- RootCanBeWrittenAfterInitialization(k##camel_name##RootIndex)); \
- DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
- roots_[k##camel_name##RootIndex] = value; \
- }
+#define ROOT_ACCESSOR(type, name, camel_name) \
+ inline void set_##name(type* value);
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
@@ -1855,45 +1829,22 @@ class Heap {
void AddPrivateGlobalSymbols(Handle<Object> private_intern_table);
- // GC callback function, called before and after mark-compact GC.
- // Allocations in the callback function are disallowed.
- struct GCPrologueCallbackPair {
- GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback,
- GCType gc_type, bool pass_isolate)
- : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {}
- bool operator==(const GCPrologueCallbackPair& pair) const {
- return pair.callback == callback;
- }
- v8::Isolate::GCPrologueCallback callback;
- GCType gc_type;
- // TODO(dcarney): remove variable
- bool pass_isolate_;
- };
- List<GCPrologueCallbackPair> gc_prologue_callbacks_;
-
- struct GCEpilogueCallbackPair {
- GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback,
- GCType gc_type, bool pass_isolate)
- : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {}
- bool operator==(const GCEpilogueCallbackPair& pair) const {
- return pair.callback == callback;
+ struct GCCallbackPair {
+ GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type,
+ bool pass_isolate)
+ : callback(callback), gc_type(gc_type), pass_isolate(pass_isolate) {}
+
+ bool operator==(const GCCallbackPair& other) const {
+ return other.callback == callback;
}
- v8::Isolate::GCPrologueCallback callback;
+
+ v8::Isolate::GCCallback callback;
GCType gc_type;
- // TODO(dcarney): remove variable
- bool pass_isolate_;
+ bool pass_isolate;
};
- List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
-
- // Support for computing object sizes during GC.
- HeapObjectCallback gc_safe_size_of_old_object_;
- static int GcSafeSizeOfOldObject(HeapObject* object);
- // Update the GC state. Called from the mark-compact collector.
- void MarkMapPointersAsEncoded(bool encoded) {
- DCHECK(!encoded);
- gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
- }
+ List<GCCallbackPair> gc_epilogue_callbacks_;
+ List<GCCallbackPair> gc_prologue_callbacks_;
// Code that should be run before and after each GC. Includes some
// reporting/verification activities when compiled with DEBUG set.
@@ -2023,17 +1974,18 @@ class Heap {
// Allocates an uninitialized fixed array. It must be filled by the caller.
MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length);
- // Make a copy of src and return it. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ // Make a copy of src and return it.
MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
- // Make a copy of src, set the map, and return the copy. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ // Make a copy of src, also grow the copy, and return the copy.
+ MUST_USE_RESULT AllocationResult
+ CopyFixedArrayAndGrow(FixedArray* src, int grow_by, PretenureFlag pretenure);
+
+ // Make a copy of src, set the map, and return the copy.
MUST_USE_RESULT AllocationResult
CopyFixedArrayWithMap(FixedArray* src, Map* map);
- // Make a copy of src and return it. Returns
- // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ // Make a copy of src and return it.
MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
FixedDoubleArray* src);
@@ -2046,9 +1998,9 @@ class Heap {
MUST_USE_RESULT AllocationResult AllocateSymbol();
// Allocates an external array of the specified length and type.
- MUST_USE_RESULT AllocationResult
- AllocateExternalArray(int length, ExternalArrayType array_type,
- void* external_pointer, PretenureFlag pretenure);
+ MUST_USE_RESULT AllocationResult AllocateFixedTypedArrayWithExternalPointer(
+ int length, ExternalArrayType array_type, void* external_pointer,
+ PretenureFlag pretenure);
// Allocates a fixed typed array of the specified length and type.
MUST_USE_RESULT AllocationResult
@@ -2076,10 +2028,6 @@ class Heap {
// Allocate empty fixed array.
MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
- // Allocate empty external array of given type.
- MUST_USE_RESULT AllocationResult
- AllocateEmptyExternalArray(ExternalArrayType array_type);
-
// Allocate empty fixed typed array of given type.
MUST_USE_RESULT AllocationResult
AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
@@ -2147,13 +2095,25 @@ class Heap {
// Called on heap tear-down. Frees all remaining ArrayBuffer backing stores.
void TearDownArrayBuffers();
+ // These correspond to the non-Helper versions.
+ void RegisterNewArrayBufferHelper(std::map<void*, size_t>& live_buffers,
+ void* data, size_t length);
+ void UnregisterArrayBufferHelper(
+ std::map<void*, size_t>& live_buffers,
+ std::map<void*, size_t>& not_yet_discovered_buffers, void* data);
+ void RegisterLiveArrayBufferHelper(
+ std::map<void*, size_t>& not_yet_discovered_buffers, void* data);
+ size_t FreeDeadArrayBuffersHelper(
+ Isolate* isolate, std::map<void*, size_t>& live_buffers,
+ std::map<void*, size_t>& not_yet_discovered_buffers);
+ void TearDownArrayBuffersHelper(
+ Isolate* isolate, std::map<void*, size_t>& live_buffers,
+ std::map<void*, size_t>& not_yet_discovered_buffers);
+
// Record statistics before and after garbage collection.
void ReportStatisticsBeforeGC();
void ReportStatisticsAfterGC();
- // Slow part of scavenge object.
- static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
-
// Total RegExp code ever generated
double total_regexp_code_generated_;
@@ -2178,23 +2138,13 @@ class Heap {
void UpdateSurvivalStatistics(int start_new_space_size);
- enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
-
static const int kYoungSurvivalRateHighThreshold = 90;
- static const int kYoungSurvivalRateLowThreshold = 10;
static const int kYoungSurvivalRateAllowedDeviation = 15;
static const int kOldSurvivalRateLowThreshold = 10;
- bool new_space_high_promotion_mode_active_;
- // If this is non-zero, then there is hope yet that the optimized code we
- // have generated will solve our high promotion rate problems, so we don't
- // need to go into high promotion mode just yet.
- int gathering_lifetime_feedback_;
int high_survival_rate_period_length_;
intptr_t promoted_objects_size_;
- int low_survival_rate_period_length_;
- double survival_rate_;
double promotion_ratio_;
double promotion_rate_;
intptr_t semi_space_copied_object_size_;
@@ -2210,59 +2160,12 @@ class Heap {
// of the allocation site.
unsigned int maximum_size_scavenges_;
- SurvivalRateTrend previous_survival_rate_trend_;
- SurvivalRateTrend survival_rate_trend_;
-
- void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
- DCHECK(survival_rate_trend != FLUCTUATING);
- previous_survival_rate_trend_ = survival_rate_trend_;
- survival_rate_trend_ = survival_rate_trend;
- }
-
- SurvivalRateTrend survival_rate_trend() {
- if (survival_rate_trend_ == STABLE) {
- return STABLE;
- } else if (previous_survival_rate_trend_ == STABLE) {
- return survival_rate_trend_;
- } else if (survival_rate_trend_ != previous_survival_rate_trend_) {
- return FLUCTUATING;
- } else {
- return survival_rate_trend_;
- }
- }
-
- bool IsStableOrIncreasingSurvivalTrend() {
- switch (survival_rate_trend()) {
- case STABLE:
- case INCREASING:
- return true;
- default:
- return false;
- }
- }
-
- bool IsStableOrDecreasingSurvivalTrend() {
- switch (survival_rate_trend()) {
- case STABLE:
- case DECREASING:
- return true;
- default:
- return false;
- }
- }
-
- bool IsIncreasingSurvivalTrend() {
- return survival_rate_trend() == INCREASING;
- }
-
- bool IsLowSurvivalRate() { return low_survival_rate_period_length_ > 0; }
-
+ // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
+ // Re-visit incremental marking heuristics.
bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
void ConfigureInitialOldGenerationSize();
- void ConfigureNewGenerationSize();
-
void SelectScavengingVisitorsTable();
bool HasLowYoungGenerationAllocationRate();
@@ -2331,8 +2234,6 @@ class Heap {
StoreBuffer store_buffer_;
- Marking marking_;
-
IncrementalMarking incremental_marking_;
GCIdleTimeHandler gc_idle_time_handler_;
@@ -2381,6 +2282,9 @@ class Heap {
// configured through the API until it is set up.
bool configured_;
+ // Currently set GC flags that are respected by all GC components.
+ int current_gc_flags_;
+
ExternalStringTable external_string_table_;
VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
@@ -2418,17 +2322,22 @@ class Heap {
StrongRootsList* strong_roots_list_;
friend class AlwaysAllocateScope;
+ friend class Bootstrapper;
friend class Deserializer;
friend class Factory;
friend class GCCallbacksScope;
friend class GCTracer;
friend class HeapIterator;
+ friend class IncrementalMarking;
friend class Isolate;
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
friend class MapCompact;
friend class Page;
+ // Used in cctest.
+ friend class HeapTester;
+
DISALLOW_COPY_AND_ASSIGN(Heap);
};
@@ -2552,7 +2461,6 @@ class PagedSpaces BASE_EMBEDDED {
class SpaceIterator : public Malloced {
public:
explicit SpaceIterator(Heap* heap);
- SpaceIterator(Heap* heap, HeapObjectCallback size_func);
virtual ~SpaceIterator();
bool has_next();
@@ -2564,7 +2472,6 @@ class SpaceIterator : public Malloced {
Heap* heap_;
int current_space_; // from enum AllocationSpace.
ObjectIterator* iterator_; // object iterator for the current space.
- HeapObjectCallback size_func_;
};
@@ -2684,25 +2591,10 @@ class DescriptorLookupCache {
public:
// Lookup descriptor index for (map, name).
// If absent, kAbsent is returned.
- int Lookup(Map* source, Name* name) {
- if (!name->IsUniqueName()) return kAbsent;
- int index = Hash(source, name);
- Key& key = keys_[index];
- if ((key.source == source) && (key.name == name)) return results_[index];
- return kAbsent;
- }
+ inline int Lookup(Map* source, Name* name);
// Update an element in the cache.
- void Update(Map* source, Name* name, int result) {
- DCHECK(result != kAbsent);
- if (name->IsUniqueName()) {
- int index = Hash(source, name);
- Key& key = keys_[index];
- key.source = source;
- key.name = name;
- results_[index] = result;
- }
- }
+ inline void Update(Map* source, Name* name, int result);
// Clear the cache.
void Clear();
@@ -2779,46 +2671,6 @@ class WeakObjectRetainer {
};
-// Intrusive object marking uses least significant bit of
-// heap object's map word to mark objects.
-// Normally all map words have least significant bit set
-// because they contain tagged map pointer.
-// If the bit is not set object is marked.
-// All objects should be unmarked before resuming
-// JavaScript execution.
-class IntrusiveMarking {
- public:
- static bool IsMarked(HeapObject* object) {
- return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
- }
-
- static void ClearMark(HeapObject* object) {
- uintptr_t map_word = object->map_word().ToRawValue();
- object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
- DCHECK(!IsMarked(object));
- }
-
- static void SetMark(HeapObject* object) {
- uintptr_t map_word = object->map_word().ToRawValue();
- object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
- DCHECK(IsMarked(object));
- }
-
- static Map* MapOfMarkedObject(HeapObject* object) {
- uintptr_t map_word = object->map_word().ToRawValue();
- return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
- }
-
- static int SizeOfMarkedObject(HeapObject* object) {
- return object->SizeFromMap(MapOfMarkedObject(object));
- }
-
- private:
- static const uintptr_t kNotMarkedBit = 0x1;
- STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0); // NOLINT
-};
-
-
#ifdef DEBUG
// Helper class for tracing paths to a search target Object from all roots.
// The TracePathFrom() method can be used to trace paths from a specific
diff --git a/deps/v8/src/heap/identity-map.cc b/deps/v8/src/heap/identity-map.cc
index a93f607ee7..f901ac4424 100644
--- a/deps/v8/src/heap/identity-map.cc
+++ b/deps/v8/src/heap/identity-map.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/heap/identity-map.h"
#include "src/heap/heap.h"
-#include "src/heap/identity-map.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/identity-map.h b/deps/v8/src/heap/identity-map.h
index 5c754bccda..672ca5a52a 100644
--- a/deps/v8/src/heap/identity-map.h
+++ b/deps/v8/src/heap/identity-map.h
@@ -10,7 +10,9 @@
namespace v8 {
namespace internal {
+// Forward declarations.
class Heap;
+class Zone;
// Base class of identity maps contains shared code for all template
// instantions.
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index 42b3dcb127..fabf59d016 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -85,7 +85,7 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
DCHECK(IsMarking());
Marking::BlackToGrey(mark_bit);
int obj_size = obj->Size();
- MemoryChunk::IncrementLiveBytesFromGC(obj->address(), -obj_size);
+ MemoryChunk::IncrementLiveBytesFromGC(obj, -obj_size);
bytes_scanned_ -= obj_size;
int64_t old_bytes_rescanned = bytes_rescanned_;
bytes_rescanned_ = old_bytes_rescanned + obj_size;
@@ -105,13 +105,13 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
}
}
- heap_->mark_compact_collector()->marking_deque()->UnshiftGrey(obj);
+ heap_->mark_compact_collector()->marking_deque()->Unshift(obj);
}
void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
Marking::WhiteToGrey(mark_bit);
- heap_->mark_compact_collector()->marking_deque()->PushGrey(obj);
+ heap_->mark_compact_collector()->marking_deque()->Push(obj);
}
}
} // namespace v8::internal
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 58eb0aa409..9549a148b4 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/heap/incremental-marking.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/conversions.h"
+#include "src/heap/mark-compact-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/objects-visiting-inl.h"
@@ -52,8 +51,7 @@ void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
MarkBit obj_bit = Marking::MarkBitFrom(obj);
if (Marking::IsBlack(obj_bit)) {
// Object is not going to be rescanned we need to record the slot.
- heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0),
- slot, value);
+ heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
}
}
}
@@ -104,7 +102,7 @@ void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
if (BaseRecordWrite(host, slot, value)) {
DCHECK(slot != NULL);
heap_->mark_compact_collector()->RecordCodeEntrySlot(
- reinterpret_cast<Address>(slot), value);
+ host, reinterpret_cast<Address>(slot), value);
}
}
@@ -139,8 +137,7 @@ static void MarkObjectGreyDoNotEnqueue(Object* obj) {
HeapObject* heap_obj = HeapObject::cast(obj);
MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
if (Marking::IsBlack(mark_bit)) {
- MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
- -heap_obj->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(heap_obj, -heap_obj->Size());
}
Marking::AnyToGrey(mark_bit);
}
@@ -152,7 +149,7 @@ static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
DCHECK(!Marking::IsImpossible(mark_bit));
if (Marking::IsBlack(mark_bit)) return;
Marking::MarkBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
+ MemoryChunk::IncrementLiveBytesFromGC(heap_object, size);
}
@@ -190,9 +187,8 @@ class IncrementalMarkingMarkingVisitor
int already_scanned_offset = start_offset;
bool scan_until_end = false;
do {
- VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0),
- HeapObject::RawField(object, start_offset),
- HeapObject::RawField(object, end_offset));
+ VisitPointers(heap, object, HeapObject::RawField(object, start_offset),
+ HeapObject::RawField(object, end_offset));
start_offset = end_offset;
end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
scan_until_end =
@@ -201,10 +197,10 @@ class IncrementalMarkingMarkingVisitor
chunk->set_progress_bar(start_offset);
if (start_offset < object_size) {
if (Marking::IsGrey(Marking::MarkBitFrom(object))) {
- heap->mark_compact_collector()->marking_deque()->UnshiftGrey(object);
+ heap->mark_compact_collector()->marking_deque()->Unshift(object);
} else {
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
- heap->mark_compact_collector()->marking_deque()->UnshiftBlack(object);
+ heap->mark_compact_collector()->UnshiftBlack(object);
}
heap->incremental_marking()->NotifyIncompleteScanOfObject(
object_size - (start_offset - already_scanned_offset));
@@ -227,31 +223,21 @@ class IncrementalMarkingMarkingVisitor
VisitNativeContext(map, context);
}
- INLINE(static void VisitPointer(Heap* heap, Object** p)) {
- Object* obj = *p;
- if (obj->IsHeapObject()) {
- heap->mark_compact_collector()->RecordSlot(p, p, obj);
- MarkObject(heap, obj);
- }
- }
-
- INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
- for (Object** p = start; p < end; p++) {
- Object* obj = *p;
- if (obj->IsHeapObject()) {
- heap->mark_compact_collector()->RecordSlot(start, p, obj);
- MarkObject(heap, obj);
- }
+ INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
+ Object* target = *p;
+ if (target->IsHeapObject()) {
+ heap->mark_compact_collector()->RecordSlot(object, p, target);
+ MarkObject(heap, target);
}
}
- INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor,
- Object** start, Object** end)) {
+ INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
+ Object** start, Object** end)) {
for (Object** p = start; p < end; p++) {
- Object* obj = *p;
- if (obj->IsHeapObject()) {
- heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
- MarkObject(heap, obj);
+ Object* target = *p;
+ if (target->IsHeapObject()) {
+ heap->mark_compact_collector()->RecordSlot(object, p, target);
+ MarkObject(heap, target);
}
}
}
@@ -268,8 +254,7 @@ class IncrementalMarkingMarkingVisitor
MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
if (Marking::IsWhite(mark_bit)) {
Marking::MarkBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
- heap_object->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size());
return true;
}
return false;
@@ -425,8 +410,7 @@ bool IncrementalMarking::CanBeActivated() {
// marking is turned on, 2) when we are currently not in a GC, and
// 3) when we are currently not serializing or deserializing the heap.
// Don't switch on for very small heaps.
- return FLAG_incremental_marking && FLAG_incremental_marking_steps &&
- heap_->gc_state() == Heap::NOT_IN_GC &&
+ return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
heap_->deserialization_complete() &&
!heap_->isolate()->serializer_enabled() &&
heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
@@ -484,7 +468,7 @@ static void PatchIncrementalMarkingRecordWriteStubs(
}
-void IncrementalMarking::Start(int mark_compact_flags,
+void IncrementalMarking::Start(int flags,
const GCCallbackFlags gc_callback_flags,
const char* reason) {
if (FLAG_trace_incremental_marking) {
@@ -492,7 +476,6 @@ void IncrementalMarking::Start(int mark_compact_flags,
(reason == nullptr) ? "unknown reason" : reason);
}
DCHECK(FLAG_incremental_marking);
- DCHECK(FLAG_incremental_marking_steps);
DCHECK(state_ == STOPPED);
DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
DCHECK(!heap_->isolate()->serializer_enabled());
@@ -503,9 +486,8 @@ void IncrementalMarking::Start(int mark_compact_flags,
was_activated_ = true;
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
- heap_->mark_compact_collector()->SetFlags(mark_compact_flags);
+ heap_->set_current_gc_flags(flags);
StartMarking();
- heap_->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
} else {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start sweeping.\n");
@@ -736,7 +718,7 @@ void IncrementalMarking::Hurry() {
if (FLAG_cleanup_code_caches_at_gc) {
PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
- MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
+ MemoryChunk::IncrementLiveBytesFromGC(poly_cache,
PolymorphicCodeCache::kSize);
}
@@ -750,7 +732,7 @@ void IncrementalMarking::Hurry() {
MarkBit mark_bit = Marking::MarkBitFrom(cache);
if (Marking::IsGrey(mark_bit)) {
Marking::GreyToBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(cache, cache->Size());
}
}
context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
@@ -758,10 +740,10 @@ void IncrementalMarking::Hurry() {
}
-void IncrementalMarking::Abort() {
+void IncrementalMarking::Stop() {
if (IsStopped()) return;
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Aborting.\n");
+ PrintF("[IncrementalMarking] Stopping.\n");
}
heap_->new_space()->LowerInlineAllocationLimit(0);
IncrementalMarking::set_should_hurry(false);
@@ -926,7 +908,6 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
ForceMarkingAction marking,
ForceCompletionAction completion) {
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
- !FLAG_incremental_marking_steps ||
(state_ != SWEEPING && state_ != MARKING)) {
return 0;
}
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 706e332327..fcada78f0b 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -5,7 +5,6 @@
#ifndef V8_HEAP_INCREMENTAL_MARKING_H_
#define V8_HEAP_INCREMENTAL_MARKING_H_
-
#include "src/execution.h"
#include "src/heap/mark-compact.h"
#include "src/objects.h"
@@ -82,12 +81,10 @@ class IncrementalMarking {
bool WasActivated();
- void Start(int mark_compact_flags,
+ void Start(int flags,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags,
const char* reason = nullptr);
- void Stop();
-
void MarkObjectGroups();
void UpdateMarkingDequeAfterScavenge();
@@ -96,7 +93,7 @@ class IncrementalMarking {
void Finalize();
- void Abort();
+ void Stop();
void OverApproximateWeakClosure(CompletionAction action);
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 1ac9a5973a..6372c2eeea 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -8,24 +8,25 @@
#include "src/heap/mark-compact.h"
#include "src/isolate.h"
-
namespace v8 {
namespace internal {
-
-MarkBit Marking::MarkBitFrom(Address addr) {
- MemoryChunk* p = MemoryChunk::FromAddress(addr);
- return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr));
+void MarkCompactCollector::PushBlack(HeapObject* obj) {
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(obj)));
+ if (marking_deque_.Push(obj)) {
+ MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
+ } else {
+ Marking::BlackToGrey(obj);
+ }
}
-void MarkCompactCollector::SetFlags(int flags) {
- reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
- abort_incremental_marking_ =
- ((flags & Heap::kAbortIncrementalMarkingMask) != 0);
- finalize_incremental_marking_ =
- ((flags & Heap::kFinalizeIncrementalMarkingMask) != 0);
- DCHECK(!finalize_incremental_marking_ || !abort_incremental_marking_);
+void MarkCompactCollector::UnshiftBlack(HeapObject* obj) {
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(obj)));
+ if (!marking_deque_.Unshift(obj)) {
+ MemoryChunk::IncrementLiveBytesFromGC(obj, -obj->Size());
+ Marking::BlackToGrey(obj);
+ }
}
@@ -33,9 +34,8 @@ void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
if (Marking::IsWhite(mark_bit)) {
Marking::WhiteToBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
DCHECK(obj->GetIsolate()->heap()->Contains(obj));
- marking_deque_.PushBlack(obj);
+ PushBlack(obj);
}
}
@@ -44,7 +44,7 @@ void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
DCHECK(Marking::IsWhite(mark_bit));
DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
Marking::WhiteToBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
}
@@ -55,19 +55,107 @@ bool MarkCompactCollector::IsMarked(Object* obj) {
}
-void MarkCompactCollector::RecordSlot(Object** anchor_slot, Object** slot,
- Object* object,
+void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
+ Object* target,
SlotsBuffer::AdditionMode mode) {
- Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
- if (object_page->IsEvacuationCandidate() &&
- !ShouldSkipEvacuationSlotRecording(anchor_slot)) {
+ Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+ if (target_page->IsEvacuationCandidate() &&
+ !ShouldSkipEvacuationSlotRecording(object)) {
if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
- object_page->slots_buffer_address(), slot, mode)) {
- EvictPopularEvacuationCandidate(object_page);
+ target_page->slots_buffer_address(), slot, mode)) {
+ EvictPopularEvacuationCandidate(target_page);
}
}
}
+
+
+void CodeFlusher::AddCandidate(SharedFunctionInfo* shared_info) {
+ if (GetNextCandidate(shared_info) == NULL) {
+ SetNextCandidate(shared_info, shared_function_info_candidates_head_);
+ shared_function_info_candidates_head_ = shared_info;
+ }
+}
+
+
+void CodeFlusher::AddCandidate(JSFunction* function) {
+ DCHECK(function->code() == function->shared()->code());
+ if (GetNextCandidate(function)->IsUndefined()) {
+ SetNextCandidate(function, jsfunction_candidates_head_);
+ jsfunction_candidates_head_ = function;
+ }
+}
+
+
+void CodeFlusher::AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
+ if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
+ SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
+ optimized_code_map_holder_head_ = code_map_holder;
+ }
+}
+
+
+JSFunction** CodeFlusher::GetNextCandidateSlot(JSFunction* candidate) {
+ return reinterpret_cast<JSFunction**>(
+ HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
+}
+
+
+JSFunction* CodeFlusher::GetNextCandidate(JSFunction* candidate) {
+ Object* next_candidate = candidate->next_function_link();
+ return reinterpret_cast<JSFunction*>(next_candidate);
+}
+
+
+void CodeFlusher::SetNextCandidate(JSFunction* candidate,
+ JSFunction* next_candidate) {
+ candidate->set_next_function_link(next_candidate, UPDATE_WEAK_WRITE_BARRIER);
+}
+
+
+void CodeFlusher::ClearNextCandidate(JSFunction* candidate, Object* undefined) {
+ DCHECK(undefined->IsUndefined());
+ candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
+}
+
+
+SharedFunctionInfo* CodeFlusher::GetNextCandidate(
+ SharedFunctionInfo* candidate) {
+ Object* next_candidate = candidate->code()->gc_metadata();
+ return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
+}
+
+
+void CodeFlusher::SetNextCandidate(SharedFunctionInfo* candidate,
+ SharedFunctionInfo* next_candidate) {
+ candidate->code()->set_gc_metadata(next_candidate);
}
-} // namespace v8::internal
+
+
+void CodeFlusher::ClearNextCandidate(SharedFunctionInfo* candidate) {
+ candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
+}
+
+
+SharedFunctionInfo* CodeFlusher::GetNextCodeMap(SharedFunctionInfo* holder) {
+ FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+ Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
+ return reinterpret_cast<SharedFunctionInfo*>(next_map);
+}
+
+
+void CodeFlusher::SetNextCodeMap(SharedFunctionInfo* holder,
+ SharedFunctionInfo* next_holder) {
+ FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+ code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
+}
+
+
+void CodeFlusher::ClearNextCodeMap(SharedFunctionInfo* holder) {
+ FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+ code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
+}
+
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_MARK_COMPACT_INL_H_
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 45f5af3f4d..57adf58e6e 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/heap/mark-compact.h"
#include "src/base/atomicops.h"
#include "src/base/bits.h"
@@ -11,16 +11,18 @@
#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
+#include "src/frames-inl.h"
#include "src/gdb-jit.h"
#include "src/global-handles.h"
#include "src/heap/incremental-marking.h"
-#include "src/heap/mark-compact.h"
+#include "src/heap/mark-compact-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/spaces-inl.h"
#include "src/heap-profiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
@@ -40,9 +42,6 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
#ifdef DEBUG
state_(IDLE),
#endif
- reduce_memory_footprint_(false),
- abort_incremental_marking_(false),
- finalize_incremental_marking_(false),
marking_parity_(ODD_MARKING_PARITY),
compacting_(false),
was_marked_incrementally_(false),
@@ -169,7 +168,7 @@ class VerifyEvacuationVisitor : public ObjectVisitor {
static void VerifyEvacuation(Page* page) {
VerifyEvacuationVisitor visitor;
- HeapObjectIterator iterator(page, NULL);
+ HeapObjectIterator iterator(page);
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
heap_object = iterator.Next()) {
// We skip free space objects.
@@ -226,6 +225,8 @@ static void VerifyEvacuation(Heap* heap) {
void MarkCompactCollector::SetUp() {
free_list_old_space_.Reset(new FreeList(heap_->old_space()));
+ free_list_code_space_.Reset(new FreeList(heap_->code_space()));
+ free_list_map_space_.Reset(new FreeList(heap_->map_space()));
EnsureMarkingDequeIsReserved();
EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
}
@@ -280,26 +281,13 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
}
-void MarkCompactCollector::ClearInvalidSlotsBufferEntries(PagedSpace* space) {
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
- SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
- }
-}
-
-
void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
heap_->store_buffer()->ClearInvalidStoreBufferEntries();
- ClearInvalidSlotsBufferEntries(heap_->old_space());
- ClearInvalidSlotsBufferEntries(heap_->code_space());
- ClearInvalidSlotsBufferEntries(heap_->map_space());
-
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- SlotsBuffer::RemoveInvalidSlots(heap_, chunk->slots_buffer());
+ int number_of_pages = evacuation_candidates_.length();
+ for (int i = 0; i < number_of_pages; i++) {
+ Page* p = evacuation_candidates_[i];
+ SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
}
}
@@ -366,13 +354,6 @@ void MarkCompactCollector::CollectGarbage() {
SweepSpaces();
-#ifdef VERIFY_HEAP
- VerifyWeakEmbeddedObjectsInCode();
- if (FLAG_omit_map_checks_for_leaf_maps) {
- VerifyOmittedMapChecks();
- }
-#endif
-
Finish();
if (marking_parity_ == EVEN_MARKING_PARITY) {
@@ -499,9 +480,31 @@ class MarkCompactCollector::SweeperTask : public v8::Task {
void MarkCompactCollector::StartSweeperThreads() {
DCHECK(free_list_old_space_.get()->IsEmpty());
+ DCHECK(free_list_code_space_.get()->IsEmpty());
+ DCHECK(free_list_map_space_.get()->IsEmpty());
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new SweeperTask(heap(), heap()->old_space()),
v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new SweeperTask(heap(), heap()->code_space()),
+ v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new SweeperTask(heap(), heap()->map_space()),
+ v8::Platform::kShortRunningTask);
+}
+
+
+void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
+ PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
+ if (!page->SweepingCompleted()) {
+ SweepInParallel(page, owner);
+ if (!page->SweepingCompleted()) {
+ // We were not able to sweep that page, i.e., a concurrent
+ // sweeper thread currently owns this page. Wait for the sweeper
+ // thread to be done with this page.
+ page->WaitUntilSweepingCompleted();
+ }
+ }
}
@@ -512,15 +515,23 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
// here.
if (!heap()->concurrent_sweeping_enabled() || !IsSweepingCompleted()) {
SweepInParallel(heap()->paged_space(OLD_SPACE), 0);
+ SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
+ SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
}
// Wait twice for both jobs.
if (heap()->concurrent_sweeping_enabled()) {
pending_sweeper_jobs_semaphore_.Wait();
+ pending_sweeper_jobs_semaphore_.Wait();
+ pending_sweeper_jobs_semaphore_.Wait();
}
ParallelSweepSpacesComplete();
sweeping_in_progress_ = false;
RefillFreeList(heap()->paged_space(OLD_SPACE));
+ RefillFreeList(heap()->paged_space(CODE_SPACE));
+ RefillFreeList(heap()->paged_space(MAP_SPACE));
heap()->paged_space(OLD_SPACE)->ResetUnsweptFreeBytes();
+ heap()->paged_space(CODE_SPACE)->ResetUnsweptFreeBytes();
+ heap()->paged_space(MAP_SPACE)->ResetUnsweptFreeBytes();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !evacuation()) {
@@ -545,6 +556,10 @@ void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
if (space == heap()->old_space()) {
free_list = free_list_old_space_.get();
+ } else if (space == heap()->code_space()) {
+ free_list = free_list_code_space_.get();
+ } else if (space == heap()->map_space()) {
+ free_list = free_list_map_space_.get();
} else {
// Any PagedSpace might invoke RefillFreeLists, so we need to make sure
// to only refill them for the old space.
@@ -557,12 +572,12 @@ void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
}
-void Marking::TransferMark(Address old_start, Address new_start) {
+void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
// This is only used when resizing an object.
DCHECK(MemoryChunk::FromAddress(old_start) ==
MemoryChunk::FromAddress(new_start));
- if (!heap_->incremental_marking()->IsMarking()) return;
+ if (!heap->incremental_marking()->IsMarking()) return;
// If the mark doesn't move, we don't check the color of the object.
// It doesn't matter whether the object is black, since it hasn't changed
@@ -582,9 +597,9 @@ void Marking::TransferMark(Address old_start, Address new_start) {
return;
} else if (Marking::IsGrey(old_mark_bit)) {
Marking::GreyToWhite(old_mark_bit);
- heap_->incremental_marking()->WhiteToGreyAndPush(
+ heap->incremental_marking()->WhiteToGreyAndPush(
HeapObject::FromAddress(new_start), new_mark_bit);
- heap_->incremental_marking()->RestartIfNotMarking();
+ heap->incremental_marking()->RestartIfNotMarking();
}
#ifdef DEBUG
@@ -648,7 +663,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
int total_live_bytes = 0;
bool reduce_memory =
- reduce_memory_footprint_ || heap()->HasLowAllocationRate();
+ heap()->ShouldReduceMemory() || heap()->HasLowAllocationRate();
if (FLAG_manual_evacuation_candidates_selection) {
for (size_t i = 0; i < pages.size(); i++) {
Page* p = pages[i].second;
@@ -770,8 +785,8 @@ void MarkCompactCollector::Prepare() {
}
// Clear marking bits if incremental marking is aborted.
- if (was_marked_incrementally_ && abort_incremental_marking_) {
- heap()->incremental_marking()->Abort();
+ if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
+ heap()->incremental_marking()->Stop();
ClearMarkbits();
AbortWeakCollections();
AbortWeakCells();
@@ -883,13 +898,13 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
// setter did not record the slot update and we have to do that manually.
Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
- isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(slot,
- target);
+ isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(
+ candidate, slot, target);
Object** shared_code_slot =
HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
isolate_->heap()->mark_compact_collector()->RecordSlot(
- shared_code_slot, shared_code_slot, *shared_code_slot);
+ shared, shared_code_slot, *shared_code_slot);
candidate = next_candidate;
}
@@ -924,7 +939,7 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
Object** code_slot =
HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
- isolate_->heap()->mark_compact_collector()->RecordSlot(code_slot, code_slot,
+ isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot,
*code_slot);
candidate = next_candidate;
@@ -974,15 +989,15 @@ void CodeFlusher::ProcessOptimizedCodeMaps() {
Object** code_slot = code_map->RawFieldOfElementAt(
new_length + SharedFunctionInfo::kCachedCodeOffset);
isolate_->heap()->mark_compact_collector()->RecordSlot(
- code_slot, code_slot, *code_slot);
+ code_map, code_slot, *code_slot);
Object** context_slot = code_map->RawFieldOfElementAt(
new_length + SharedFunctionInfo::kContextOffset);
isolate_->heap()->mark_compact_collector()->RecordSlot(
- context_slot, context_slot, *context_slot);
+ code_map, context_slot, *context_slot);
Object** literals_slot = code_map->RawFieldOfElementAt(
new_length + SharedFunctionInfo::kLiteralsOffset);
isolate_->heap()->mark_compact_collector()->RecordSlot(
- literals_slot, literals_slot, *literals_slot);
+ code_map, literals_slot, *literals_slot);
new_length += SharedFunctionInfo::kEntryLength;
}
@@ -996,7 +1011,7 @@ void CodeFlusher::ProcessOptimizedCodeMaps() {
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(shared_code)));
Object** slot =
code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex);
- isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
+ isolate_->heap()->mark_compact_collector()->RecordSlot(code_map, slot,
*slot);
}
}
@@ -1180,41 +1195,6 @@ MarkCompactCollector::~MarkCompactCollector() {
}
-static inline HeapObject* ShortCircuitConsString(Object** p) {
- // Optimization: If the heap object pointed to by p is a non-internalized
- // cons string whose right substring is HEAP->empty_string, update
- // it in place to its left substring. Return the updated value.
- //
- // Here we assume that if we change *p, we replace it with a heap object
- // (i.e., the left substring of a cons string is always a heap object).
- //
- // The check performed is:
- // object->IsConsString() && !object->IsInternalizedString() &&
- // (ConsString::cast(object)->second() == HEAP->empty_string())
- // except the maps for the object and its possible substrings might be
- // marked.
- HeapObject* object = HeapObject::cast(*p);
- Map* map = object->map();
- InstanceType type = map->instance_type();
- if (!IsShortcutCandidate(type)) return object;
-
- Object* second = reinterpret_cast<ConsString*>(object)->second();
- Heap* heap = map->GetHeap();
- if (second != heap->empty_string()) {
- return object;
- }
-
- // Since we don't have the object's start, it is impossible to update the
- // page dirty marks. Therefore, we only replace the string with its left
- // substring when page dirty marks do not change.
- Object* first = reinterpret_cast<ConsString*>(object)->first();
- if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
-
- *p = first;
- return HeapObject::cast(first);
-}
-
-
class MarkCompactMarkingVisitor
: public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
public:
@@ -1233,20 +1213,21 @@ class MarkCompactMarkingVisitor
static void Initialize();
- INLINE(static void VisitPointer(Heap* heap, Object** p)) {
- MarkObjectByPointer(heap->mark_compact_collector(), p, p);
+ INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
+ MarkObjectByPointer(heap->mark_compact_collector(), object, p);
}
- INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
+ INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
+ Object** start, Object** end)) {
// Mark all objects pointed to in [start, end).
const int kMinRangeForMarkingRecursion = 64;
if (end - start >= kMinRangeForMarkingRecursion) {
- if (VisitUnmarkedObjects(heap, start, end)) return;
+ if (VisitUnmarkedObjects(heap, object, start, end)) return;
// We are close to a stack overflow, so just mark the objects.
}
MarkCompactCollector* collector = heap->mark_compact_collector();
for (Object** p = start; p < end; p++) {
- MarkObjectByPointer(collector, start, p);
+ MarkObjectByPointer(collector, object, p);
}
}
@@ -1269,12 +1250,12 @@ class MarkCompactMarkingVisitor
// Mark object pointed to by p.
INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
- Object** anchor_slot, Object** p)) {
+ HeapObject* object, Object** p)) {
if (!(*p)->IsHeapObject()) return;
- HeapObject* object = ShortCircuitConsString(p);
- collector->RecordSlot(anchor_slot, p, object);
- MarkBit mark = Marking::MarkBitFrom(object);
- collector->MarkObject(object, mark);
+ HeapObject* target_object = HeapObject::cast(*p);
+ collector->RecordSlot(object, p, target_object);
+ MarkBit mark = Marking::MarkBitFrom(target_object);
+ collector->MarkObject(target_object, mark);
}
@@ -1297,8 +1278,8 @@ class MarkCompactMarkingVisitor
// Visit all unmarked objects pointed to by [start, end).
// Returns false if the operation fails (lack of stack space).
- INLINE(static bool VisitUnmarkedObjects(Heap* heap, Object** start,
- Object** end)) {
+ INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
+ Object** start, Object** end)) {
// Return false is we are close to the stack limit.
StackLimitCheck check(heap->isolate());
if (check.HasOverflowed()) return false;
@@ -1308,7 +1289,7 @@ class MarkCompactMarkingVisitor
for (Object** p = start; p < end; p++) {
Object* o = *p;
if (!o->IsHeapObject()) continue;
- collector->RecordSlot(start, p, o);
+ collector->RecordSlot(object, p, o);
HeapObject* obj = HeapObject::cast(o);
MarkBit mark = Marking::MarkBitFrom(obj);
if (Marking::IsBlackOrGrey(mark)) continue;
@@ -1349,11 +1330,11 @@ class MarkCompactMarkingVisitor
FixedArray* data = FixedArray::cast(re->data());
Object** slot =
data->data_start() + JSRegExp::saved_code_index(is_one_byte);
- heap->mark_compact_collector()->RecordSlot(slot, slot, code);
+ heap->mark_compact_collector()->RecordSlot(data, slot, code);
// Set a number in the 0-255 range to guarantee no smi overflow.
re->SetDataAt(JSRegExp::code_index(is_one_byte),
- Smi::FromInt(heap->sweep_generation() & 0xff));
+ Smi::FromInt(heap->ms_count() & 0xff));
} else if (code->IsSmi()) {
int value = Smi::cast(code)->value();
// The regexp has not been compiled yet or there was a compilation error.
@@ -1363,7 +1344,7 @@ class MarkCompactMarkingVisitor
}
// Check if we should flush now.
- if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
+ if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
re->SetDataAt(JSRegExp::code_index(is_one_byte),
Smi::FromInt(JSRegExp::kUninitializedValue));
re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
@@ -1649,7 +1630,7 @@ class RootMarkingVisitor : public ObjectVisitor {
if (!(*p)->IsHeapObject()) return;
// Replace flat cons strings in place.
- HeapObject* object = ShortCircuitConsString(p);
+ HeapObject* object = HeapObject::cast(*p);
MarkBit mark_bit = Marking::MarkBitFrom(object);
if (Marking::IsBlackOrGrey(mark_bit)) return;
@@ -1737,21 +1718,18 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
// iterator. Stop when the marking stack is filled or the end of the space
// is reached, whichever comes first.
template <class T>
-static void DiscoverGreyObjectsWithIterator(Heap* heap,
- MarkingDeque* marking_deque,
- T* it) {
+void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
// The caller should ensure that the marking stack is initially not full,
// so that we don't waste effort pointlessly scanning for objects.
- DCHECK(!marking_deque->IsFull());
+ DCHECK(!marking_deque()->IsFull());
- Map* filler_map = heap->one_pointer_filler_map();
+ Map* filler_map = heap()->one_pointer_filler_map();
for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
MarkBit markbit = Marking::MarkBitFrom(object);
if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
Marking::GreyToBlack(markbit);
- MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
- marking_deque->PushBlack(object);
- if (marking_deque->IsFull()) return;
+ PushBlack(object);
+ if (marking_deque()->IsFull()) return;
}
}
}
@@ -1760,9 +1738,8 @@ static void DiscoverGreyObjectsWithIterator(Heap* heap,
static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
-static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
- MemoryChunk* p) {
- DCHECK(!marking_deque->IsFull());
+void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
+ DCHECK(!marking_deque()->IsFull());
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
@@ -1794,9 +1771,8 @@ static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
Marking::GreyToBlack(markbit);
Address addr = cell_base + offset * kPointerSize;
HeapObject* object = HeapObject::FromAddress(addr);
- MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
- marking_deque->PushBlack(object);
- if (marking_deque->IsFull()) return;
+ PushBlack(object);
+ if (marking_deque()->IsFull()) return;
offset += 2;
grey_objects >>= 2;
}
@@ -1869,25 +1845,23 @@ int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
}
-static void DiscoverGreyObjectsInSpace(Heap* heap, MarkingDeque* marking_deque,
- PagedSpace* space) {
+void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
PageIterator it(space);
while (it.has_next()) {
Page* p = it.next();
- DiscoverGreyObjectsOnPage(marking_deque, p);
- if (marking_deque->IsFull()) return;
+ DiscoverGreyObjectsOnPage(p);
+ if (marking_deque()->IsFull()) return;
}
}
-static void DiscoverGreyObjectsInNewSpace(Heap* heap,
- MarkingDeque* marking_deque) {
- NewSpace* space = heap->new_space();
+void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
+ NewSpace* space = heap()->new_space();
NewSpacePageIterator it(space->bottom(), space->top());
while (it.has_next()) {
NewSpacePage* page = it.next();
- DiscoverGreyObjectsOnPage(marking_deque, page);
- if (marking_deque->IsFull()) return;
+ DiscoverGreyObjectsOnPage(page);
+ if (marking_deque()->IsFull()) return;
}
}
@@ -2012,20 +1986,20 @@ void MarkCompactCollector::RefillMarkingDeque() {
isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
DCHECK(marking_deque_.overflowed());
- DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
+ DiscoverGreyObjectsInNewSpace();
if (marking_deque_.IsFull()) return;
- DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->old_space());
+ DiscoverGreyObjectsInSpace(heap()->old_space());
if (marking_deque_.IsFull()) return;
- DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->code_space());
+ DiscoverGreyObjectsInSpace(heap()->code_space());
if (marking_deque_.IsFull()) return;
- DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->map_space());
+ DiscoverGreyObjectsInSpace(heap()->map_space());
if (marking_deque_.IsFull()) return;
LargeObjectIterator lo_it(heap()->lo_space());
- DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &lo_it);
+ DiscoverGreyObjectsWithIterator(&lo_it);
if (marking_deque_.IsFull()) return;
marking_deque_.ClearOverflowed();
@@ -2083,7 +2057,7 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
void MarkCompactCollector::RetainMaps() {
- if (reduce_memory_footprint_ || abort_incremental_marking_ ||
+ if (heap()->ShouldReduceMemory() || heap()->ShouldAbortIncrementalMarking() ||
FLAG_retain_maps_for_n_gc == 0) {
// Do not retain dead maps if flag disables it or there is
// - memory pressure (reduce_memory_footprint_),
@@ -2131,7 +2105,7 @@ void MarkCompactCollector::RetainMaps() {
if (i != new_length) {
retained_maps->Set(new_length, cell);
Object** slot = retained_maps->Slot(new_length);
- RecordSlot(slot, slot, cell);
+ RecordSlot(retained_maps, slot, cell);
retained_maps->Set(new_length + 1, Smi::FromInt(new_age));
} else if (new_age != age) {
retained_maps->Set(new_length + 1, Smi::FromInt(new_age));
@@ -2244,7 +2218,7 @@ void MarkCompactCollector::MarkLiveObjects() {
incremental_marking->Finalize();
} else {
// Abort any pending incremental activities e.g. incremental sweeping.
- incremental_marking->Abort();
+ incremental_marking->Stop();
if (marking_deque_.in_use()) {
marking_deque_.Uninitialize(true);
}
@@ -2401,7 +2375,7 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
prototype_transitions->set(header + new_number_of_transitions, cell);
Object** slot = prototype_transitions->RawFieldOfElementAt(
header + new_number_of_transitions);
- RecordSlot(slot, slot, cell);
+ RecordSlot(prototype_transitions, slot, cell);
}
new_number_of_transitions++;
}
@@ -2484,7 +2458,7 @@ void MarkCompactCollector::ClearMapTransitions(Map* map, Map* dead_transition) {
Name* key = t->GetKey(i);
t->SetKey(transition_index, key);
Object** key_slot = t->GetKeySlot(transition_index);
- RecordSlot(key_slot, key_slot, key);
+ RecordSlot(t, key_slot, key);
// Target slots do not need to be recorded since maps are not compacted.
t->SetTarget(transition_index, t->GetTarget(i));
}
@@ -2580,15 +2554,14 @@ void MarkCompactCollector::ProcessWeakCollections() {
DCHECK(MarkCompactCollector::IsMarked(weak_collection));
if (weak_collection->table()->IsHashTable()) {
ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
- Object** anchor = reinterpret_cast<Object**>(table->address());
for (int i = 0; i < table->Capacity(); i++) {
if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
Object** key_slot =
table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
- RecordSlot(anchor, key_slot, *key_slot);
+ RecordSlot(table, key_slot, *key_slot);
Object** value_slot =
table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
- MarkCompactMarkingVisitor::MarkObjectByPointer(this, anchor,
+ MarkCompactMarkingVisitor::MarkObjectByPointer(this, table,
value_slot);
}
}
@@ -2657,9 +2630,9 @@ void MarkCompactCollector::ProcessAndClearWeakCells() {
MarkBit mark = Marking::MarkBitFrom(value);
SetMark(value, mark);
Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
- RecordSlot(slot, slot, *slot);
+ RecordSlot(value, slot, *slot);
slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
- RecordSlot(slot, slot, *slot);
+ RecordSlot(weak_cell, slot, *slot);
} else {
weak_cell->clear();
}
@@ -2668,7 +2641,7 @@ void MarkCompactCollector::ProcessAndClearWeakCells() {
}
} else {
Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
- RecordSlot(slot, slot, *slot);
+ RecordSlot(weak_cell, slot, *slot);
}
weak_cell_obj = weak_cell->next();
weak_cell->clear_next(heap());
@@ -2782,28 +2755,6 @@ void MarkCompactCollector::MigrateObjectMixed(HeapObject* dst, HeapObject* src,
Address base_pointer_slot =
dst->address() + FixedTypedArrayBase::kBasePointerOffset;
RecordMigratedSlot(Memory::Object_at(base_pointer_slot), base_pointer_slot);
- } else if (src->IsJSArrayBuffer()) {
- heap()->MoveBlock(dst->address(), src->address(), size);
-
- // Visit inherited JSObject properties and byte length of ArrayBuffer
- Address regular_slot =
- dst->address() + JSArrayBuffer::BodyDescriptor::kStartOffset;
- Address regular_slots_end =
- dst->address() + JSArrayBuffer::kByteLengthOffset + kPointerSize;
- while (regular_slot < regular_slots_end) {
- RecordMigratedSlot(Memory::Object_at(regular_slot), regular_slot);
- regular_slot += kPointerSize;
- }
-
- // Skip backing store and visit just internal fields
- Address internal_field_slot = dst->address() + JSArrayBuffer::kSize;
- Address internal_fields_end =
- dst->address() + JSArrayBuffer::kSizeWithInternalFields;
- while (internal_field_slot < internal_fields_end) {
- RecordMigratedSlot(Memory::Object_at(internal_field_slot),
- internal_field_slot);
- internal_field_slot += kPointerSize;
- }
} else if (FLAG_unbox_double_fields) {
Address dst_addr = dst->address();
Address src_addr = src->address();
@@ -2891,13 +2842,12 @@ class PointersUpdatingVisitor : public ObjectVisitor {
}
void VisitDebugTarget(RelocInfo* rinfo) {
- DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+ DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence());
+ Object* target =
+ Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
VisitPointer(&target);
- rinfo->set_call_address(Code::cast(target)->instruction_start());
+ rinfo->set_debug_call_address(Code::cast(target)->instruction_start());
}
static inline void UpdateSlot(Heap* heap, Object** slot) {
@@ -3228,12 +3178,6 @@ bool MarkCompactCollector::IsSlotInLiveObject(Address slot) {
if (object->IsFixedTypedArrayBase()) {
return static_cast<int>(slot - object->address()) ==
FixedTypedArrayBase::kBasePointerOffset;
- } else if (object->IsJSArrayBuffer()) {
- int off = static_cast<int>(slot - object->address());
- return (off >= JSArrayBuffer::BodyDescriptor::kStartOffset &&
- off <= JSArrayBuffer::kByteLengthOffset) ||
- (off >= JSArrayBuffer::kSize &&
- off < JSArrayBuffer::kSizeWithInternalFields);
} else if (FLAG_unbox_double_fields) {
// Filter out slots that happen to point to unboxed double fields.
LayoutDescriptorHelper helper(object->map());
@@ -3260,23 +3204,6 @@ void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
}
-void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
- Address end_slot) {
- // Remove entries by replacing them with an old-space slot containing a smi
- // that is located in an unmovable page.
- int npages = evacuation_candidates_.length();
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
- DCHECK(p->IsEvacuationCandidate() ||
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
- if (p->IsEvacuationCandidate()) {
- SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot,
- end_slot);
- }
- }
-}
-
-
void MarkCompactCollector::EvacuateNewSpace() {
// There are soft limits in the allocation code, designed trigger a mark
// sweep collection by failing allocations. But since we are already in
@@ -3459,15 +3386,10 @@ static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
break;
}
case SlotsBuffer::DEBUG_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
+ RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0, NULL);
if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
break;
}
- case SlotsBuffer::JS_RETURN_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
- if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
- break;
- }
case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
rinfo.Visit(isolate, v);
@@ -3527,14 +3449,17 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
int offsets[16];
+ // If we use the skip list for code space pages, we have to lock the skip
+ // list because it could be accessed concurrently by the runtime or the
+ // deoptimizer.
SkipList* skip_list = p->skip_list();
- int curr_region = -1;
if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
skip_list->Clear();
}
intptr_t freed_bytes = 0;
intptr_t max_freed_bytes = 0;
+ int curr_region = -1;
for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
Address cell_base = it.CurrentCellBase();
@@ -3593,12 +3518,65 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
}
+static bool IsOnInvalidatedCodeObject(Address addr) {
+ // We did not record any slots in large objects thus
+ // we can safely go to the page from the slot address.
+ Page* p = Page::FromAddress(addr);
+
+ // First check owner's identity because old space is swept concurrently or
+ // lazily and might still have non-zero mark-bits on some pages.
+ if (p->owner()->identity() != CODE_SPACE) return false;
+
+ // In code space only bits on evacuation candidates (but we don't record
+ // any slots on them) and under invalidated code objects are non-zero.
+ MarkBit mark_bit =
+ p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
+
+ return Marking::IsBlackOrGrey(mark_bit);
+}
+
+
+void MarkCompactCollector::InvalidateCode(Code* code) {
+ if (heap_->incremental_marking()->IsCompacting() &&
+ !ShouldSkipEvacuationSlotRecording(code)) {
+ DCHECK(compacting_);
+
+ // If the object is white than no slots were recorded on it yet.
+ MarkBit mark_bit = Marking::MarkBitFrom(code);
+ if (Marking::IsWhite(mark_bit)) return;
+
+ // Ignore all slots that might have been recorded in the body of the
+ // deoptimized code object. Assumption: no slots will be recorded for
+ // this object after invalidating it.
+ RemoveObjectSlots(code->instruction_start(),
+ code->address() + code->Size());
+ }
+}
+
+
// Return true if the given code is deoptimized or will be deoptimized.
bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
return code->is_optimized_code() && code->marked_for_deoptimization();
}
+void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
+ Address end_slot) {
+ // Remove entries by replacing them with an old-space slot containing a smi
+ // that is located in an unmovable page.
+ int npages = evacuation_candidates_.length();
+ for (int i = 0; i < npages; i++) {
+ Page* p = evacuation_candidates_[i];
+ DCHECK(p->IsEvacuationCandidate() ||
+ p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+ if (p->IsEvacuationCandidate()) {
+ SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot,
+ end_slot);
+ }
+ }
+}
+
+
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
Heap::RelocationLock relocation_lock(heap());
@@ -3623,8 +3601,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
// Update pointers in to space.
- SemiSpaceIterator to_it(heap()->new_space()->bottom(),
- heap()->new_space()->top());
+ SemiSpaceIterator to_it(heap()->new_space());
for (HeapObject* object = to_it.Next(); object != NULL;
object = to_it.Next()) {
Map* map = object->map();
@@ -3658,6 +3635,8 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
}
if (compacting_ && was_marked_incrementally_) {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_RESCAN_LARGE_OBJECTS);
// It's difficult to filter out slots recorded for large objects.
LargeObjectIterator it(heap_->lo_space());
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
@@ -4197,12 +4176,33 @@ int MarkCompactCollector::SweepInParallel(PagedSpace* space,
int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
int max_freed = 0;
- if (page->TryParallelSweeping()) {
- FreeList* free_list = free_list_old_space_.get();
+ if (page->TryLock()) {
+ // If this page was already swept in the meantime, we can return here.
+ if (page->parallel_sweeping() != MemoryChunk::SWEEPING_PENDING) {
+ page->mutex()->Unlock();
+ return 0;
+ }
+ page->set_parallel_sweeping(MemoryChunk::SWEEPING_IN_PROGRESS);
+ FreeList* free_list;
FreeList private_free_list(space);
- max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+ if (space->identity() == OLD_SPACE) {
+ free_list = free_list_old_space_.get();
+ max_freed =
+ Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+ } else if (space->identity() == CODE_SPACE) {
+ free_list = free_list_code_space_.get();
+ max_freed =
+ Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+ } else {
+ free_list = free_list_map_space_.get();
+ max_freed =
+ Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+ }
free_list->Concatenate(&private_free_list);
+ page->mutex()->Unlock();
}
return max_freed;
}
@@ -4258,8 +4258,18 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
reinterpret_cast<intptr_t>(p));
}
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+ if (space->identity() == CODE_SPACE) {
+ if (FLAG_zap_code_space) {
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+ ZAP_FREE_SPACE>(space, NULL, p, NULL);
+ } else {
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+ }
+ } else {
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+ }
pages_swept++;
parallel_sweeping_active = true;
} else {
@@ -4276,12 +4286,14 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
}
- if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- ZAP_FREE_SPACE>(space, NULL, p, NULL);
- } else if (space->identity() == CODE_SPACE) {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+ if (space->identity() == CODE_SPACE) {
+ if (FLAG_zap_code_space) {
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+ ZAP_FREE_SPACE>(space, NULL, p, NULL);
+ } else {
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+ }
} else {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
@@ -4322,36 +4334,31 @@ void MarkCompactCollector::SweepSpaces() {
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
{
- GCTracer::Scope sweep_scope(heap()->tracer(),
- GCTracer::Scope::MC_SWEEP_OLDSPACE);
- { SweepSpace(heap()->old_space(), CONCURRENT_SWEEPING); }
+ {
+ GCTracer::Scope sweep_scope(heap()->tracer(),
+ GCTracer::Scope::MC_SWEEP_OLDSPACE);
+ SweepSpace(heap()->old_space(), CONCURRENT_SWEEPING);
+ }
+ {
+ GCTracer::Scope sweep_scope(heap()->tracer(),
+ GCTracer::Scope::MC_SWEEP_CODE);
+ SweepSpace(heap()->code_space(), CONCURRENT_SWEEPING);
+ }
+ {
+ GCTracer::Scope sweep_scope(heap()->tracer(),
+ GCTracer::Scope::MC_SWEEP_MAP);
+ SweepSpace(heap()->map_space(), CONCURRENT_SWEEPING);
+ }
sweeping_in_progress_ = true;
if (heap()->concurrent_sweeping_enabled()) {
StartSweeperThreads();
}
}
- {
- GCTracer::Scope sweep_scope(heap()->tracer(),
- GCTracer::Scope::MC_SWEEP_CODE);
- SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING);
- }
EvacuateNewSpaceAndCandidates();
- // NOTE: ArrayBuffers must be evacuated first, before freeing them. Otherwise
- // not yet discovered buffers for scavenge will have all of them, and they
- // will be erroneously freed.
heap()->FreeDeadArrayBuffers(false);
- // ClearNonLiveReferences depends on precise sweeping of map space to
- // detect whether unmarked map became dead in this collection or in one
- // of the previous ones.
- {
- GCTracer::Scope sweep_scope(heap()->tracer(),
- GCTracer::Scope::MC_SWEEP_MAP);
- SweepSpace(heap()->map_space(), SEQUENTIAL_SWEEPING);
- }
-
// Deallocate unmarked objects and clear marked bits for marked objects.
heap_->lo_space()->FreeUnmarkedObjects();
@@ -4390,14 +4397,13 @@ void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
void MarkCompactCollector::ParallelSweepSpacesComplete() {
ParallelSweepSpaceComplete(heap()->old_space());
+ ParallelSweepSpaceComplete(heap()->code_space());
+ ParallelSweepSpaceComplete(heap()->map_space());
}
void MarkCompactCollector::EnableCodeFlushing(bool enable) {
- if (isolate()->debug()->is_loaded() ||
- isolate()->debug()->has_break_points()) {
- enable = false;
- }
+ if (isolate()->debug()->is_active()) enable = false;
if (enable) {
if (code_flusher_ != NULL) return;
@@ -4559,8 +4565,6 @@ static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
} else if (RelocInfo::IsDebugBreakSlot(rmode)) {
return SlotsBuffer::DEBUG_TARGET_SLOT;
- } else if (RelocInfo::IsJSReturn(rmode)) {
- return SlotsBuffer::JS_RETURN_SLOT;
}
UNREACHABLE();
return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
@@ -4617,10 +4621,11 @@ void MarkCompactCollector::EvictPopularEvacuationCandidate(Page* page) {
}
-void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
+void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* object, Address slot,
+ Code* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
if (target_page->IsEvacuationCandidate() &&
- !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
+ !ShouldSkipEvacuationSlotRecording(object)) {
if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
target_page->slots_buffer_address(),
SlotsBuffer::CODE_ENTRY_SLOT, slot,
@@ -4669,6 +4674,28 @@ void SlotsBuffer::UpdateSlots(Heap* heap) {
}
+void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
+ PointersUpdatingVisitor v(heap);
+
+ for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
+ ObjectSlot slot = slots_[slot_idx];
+ if (!IsTypedSlot(slot)) {
+ if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
+ PointersUpdatingVisitor::UpdateSlot(heap, slot);
+ }
+ } else {
+ ++slot_idx;
+ DCHECK(slot_idx < idx_);
+ Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
+ if (!IsOnInvalidatedCodeObject(pc)) {
+ UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
+ reinterpret_cast<Address>(slots_[slot_idx]));
+ }
+ }
+ }
+}
+
+
SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
return new SlotsBuffer(next_buffer);
}
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 9892e0e42c..843e73d8e7 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -26,11 +26,12 @@ class MarkingVisitor;
class RootMarkingVisitor;
-class Marking {
+class Marking : public AllStatic {
public:
- explicit Marking(Heap* heap) : heap_(heap) {}
-
- INLINE(static MarkBit MarkBitFrom(Address addr));
+ INLINE(static MarkBit MarkBitFrom(Address addr)) {
+ MemoryChunk* p = MemoryChunk::FromAddress(addr);
+ return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr));
+ }
INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
return MarkBitFrom(reinterpret_cast<Address>(obj));
@@ -116,7 +117,11 @@ class Marking {
markbit.Next().Set();
}
- void TransferMark(Address old_start, Address new_start);
+ static void SetAllMarkBitsInRange(MarkBit start, MarkBit end);
+ static void ClearAllMarkBitsOfCellsContainedInRange(MarkBit start,
+ MarkBit end);
+
+ static void TransferMark(Heap* heap, Address old_start, Address new_start);
#ifdef DEBUG
enum ObjectColor {
@@ -170,7 +175,7 @@ class Marking {
}
private:
- Heap* heap_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Marking);
};
// ----------------------------------------------------------------------------
@@ -200,28 +205,17 @@ class MarkingDeque {
void SetOverflowed() { overflowed_ = true; }
- // Push the (marked) object on the marking stack if there is room,
- // otherwise mark the object as overflowed and wait for a rescan of the
- // heap.
- INLINE(void PushBlack(HeapObject* object)) {
- DCHECK(object->IsHeapObject());
- if (IsFull()) {
- Marking::BlackToGrey(object);
- MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
- SetOverflowed();
- } else {
- array_[top_] = object;
- top_ = ((top_ + 1) & mask_);
- }
- }
-
- INLINE(void PushGrey(HeapObject* object)) {
+ // Push the object on the marking stack if there is room, otherwise mark the
+ // deque as overflowed and wait for a rescan of the heap.
+ INLINE(bool Push(HeapObject* object)) {
DCHECK(object->IsHeapObject());
if (IsFull()) {
SetOverflowed();
+ return false;
} else {
array_[top_] = object;
top_ = ((top_ + 1) & mask_);
+ return true;
}
}
@@ -233,26 +227,17 @@ class MarkingDeque {
return object;
}
- INLINE(void UnshiftGrey(HeapObject* object)) {
+ // Unshift the object into the marking stack if there is room, otherwise mark
+ // the deque as overflowed and wait for a rescan of the heap.
+ INLINE(bool Unshift(HeapObject* object)) {
DCHECK(object->IsHeapObject());
if (IsFull()) {
SetOverflowed();
+ return false;
} else {
bottom_ = ((bottom_ - 1) & mask_);
array_[bottom_] = object;
- }
- }
-
- INLINE(void UnshiftBlack(HeapObject* object)) {
- DCHECK(object->IsHeapObject());
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
- if (IsFull()) {
- Marking::BlackToGrey(object);
- MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
- SetOverflowed();
- } else {
- bottom_ = ((bottom_ - 1) & mask_);
- array_[bottom_] = object;
+ return true;
}
}
@@ -335,7 +320,6 @@ class SlotsBuffer {
CODE_TARGET_SLOT,
CODE_ENTRY_SLOT,
DEBUG_TARGET_SLOT,
- JS_RETURN_SLOT,
NUMBER_OF_SLOT_TYPES
};
@@ -355,8 +339,6 @@ class SlotsBuffer {
return "CODE_ENTRY_SLOT";
case DEBUG_TARGET_SLOT:
return "DEBUG_TARGET_SLOT";
- case JS_RETURN_SLOT:
- return "JS_RETURN_SLOT";
case NUMBER_OF_SLOT_TYPES:
return "NUMBER_OF_SLOT_TYPES";
}
@@ -365,6 +347,8 @@ class SlotsBuffer {
void UpdateSlots(Heap* heap);
+ void UpdateSlotsWithFilter(Heap* heap);
+
SlotsBuffer* next() { return next_; }
static int SizeOfChain(SlotsBuffer* buffer) {
@@ -379,7 +363,7 @@ class SlotsBuffer {
static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer) {
while (buffer != NULL) {
- buffer->UpdateSlots(heap);
+ buffer->UpdateSlots(heap);
buffer = buffer->next();
}
}
@@ -455,27 +439,9 @@ class CodeFlusher {
shared_function_info_candidates_head_(NULL),
optimized_code_map_holder_head_(NULL) {}
- void AddCandidate(SharedFunctionInfo* shared_info) {
- if (GetNextCandidate(shared_info) == NULL) {
- SetNextCandidate(shared_info, shared_function_info_candidates_head_);
- shared_function_info_candidates_head_ = shared_info;
- }
- }
-
- void AddCandidate(JSFunction* function) {
- DCHECK(function->code() == function->shared()->code());
- if (GetNextCandidate(function)->IsUndefined()) {
- SetNextCandidate(function, jsfunction_candidates_head_);
- jsfunction_candidates_head_ = function;
- }
- }
-
- void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
- if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
- SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
- optimized_code_map_holder_head_ = code_map_holder;
- }
- }
+ inline void AddCandidate(SharedFunctionInfo* shared_info);
+ inline void AddCandidate(JSFunction* function);
+ inline void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
void EvictCandidate(SharedFunctionInfo* shared_info);
@@ -503,57 +469,23 @@ class CodeFlusher {
void EvictJSFunctionCandidates();
void EvictSharedFunctionInfoCandidates();
- static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
- return reinterpret_cast<JSFunction**>(
- HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
- }
+ static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
+ static inline JSFunction* GetNextCandidate(JSFunction* candidate);
+ static inline void SetNextCandidate(JSFunction* candidate,
+ JSFunction* next_candidate);
+ static inline void ClearNextCandidate(JSFunction* candidate,
+ Object* undefined);
- static JSFunction* GetNextCandidate(JSFunction* candidate) {
- Object* next_candidate = candidate->next_function_link();
- return reinterpret_cast<JSFunction*>(next_candidate);
- }
+ static inline SharedFunctionInfo* GetNextCandidate(
+ SharedFunctionInfo* candidate);
+ static inline void SetNextCandidate(SharedFunctionInfo* candidate,
+ SharedFunctionInfo* next_candidate);
+ static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
- static void SetNextCandidate(JSFunction* candidate,
- JSFunction* next_candidate) {
- candidate->set_next_function_link(next_candidate,
- UPDATE_WEAK_WRITE_BARRIER);
- }
-
- static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
- DCHECK(undefined->IsUndefined());
- candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
- }
-
- static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
- Object* next_candidate = candidate->code()->gc_metadata();
- return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
- }
-
- static void SetNextCandidate(SharedFunctionInfo* candidate,
- SharedFunctionInfo* next_candidate) {
- candidate->code()->set_gc_metadata(next_candidate);
- }
-
- static void ClearNextCandidate(SharedFunctionInfo* candidate) {
- candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
- }
-
- static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
- return reinterpret_cast<SharedFunctionInfo*>(next_map);
- }
-
- static void SetNextCodeMap(SharedFunctionInfo* holder,
- SharedFunctionInfo* next_holder) {
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
- }
-
- static void ClearNextCodeMap(SharedFunctionInfo* holder) {
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
- }
+ static inline SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder);
+ static inline void SetNextCodeMap(SharedFunctionInfo* holder,
+ SharedFunctionInfo* next_holder);
+ static inline void ClearNextCodeMap(SharedFunctionInfo* holder);
Isolate* isolate_;
JSFunction* jsfunction_candidates_head_;
@@ -572,9 +504,6 @@ class ThreadLocalTop;
// Mark-Compact collector
class MarkCompactCollector {
public:
- // Set the global flags, it must be called before Prepare to take effect.
- inline void SetFlags(int flags);
-
static void Initialize();
void SetUp();
@@ -637,11 +566,6 @@ class MarkCompactCollector {
void VerifyOmittedMapChecks();
#endif
- INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
- return Page::FromAddress(reinterpret_cast<Address>(anchor))
- ->ShouldSkipEvacuationSlotRecording();
- }
-
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
return Page::FromAddress(reinterpret_cast<Address>(host))
->ShouldSkipEvacuationSlotRecording();
@@ -653,11 +577,11 @@ class MarkCompactCollector {
}
void RecordRelocSlot(RelocInfo* rinfo, Object* target);
- void RecordCodeEntrySlot(Address slot, Code* target);
+ void RecordCodeEntrySlot(HeapObject* object, Address slot, Code* target);
void RecordCodeTargetPatch(Address pc, Code* target);
INLINE(void RecordSlot(
- Object** anchor_slot, Object** slot, Object* object,
+ HeapObject* object, Object** slot, Object* target,
SlotsBuffer::AdditionMode mode = SlotsBuffer::FAIL_ON_OVERFLOW));
void MigrateObject(HeapObject* dst, HeapObject* src, int size,
@@ -669,13 +593,9 @@ class MarkCompactCollector {
bool TryPromoteObject(HeapObject* object, int object_size);
- void ClearMarkbits();
-
- bool abort_incremental_marking() const { return abort_incremental_marking_; }
+ void InvalidateCode(Code* code);
- bool finalize_incremental_marking() const {
- return finalize_incremental_marking_;
- }
+ void ClearMarkbits();
bool is_compacting() const { return compacting_; }
@@ -694,6 +614,8 @@ class MarkCompactCollector {
void EnsureSweepingCompleted();
+ void SweepOrWaitUntilSweepingCompleted(Page* page);
+
// If sweeper threads are not active this method will return true. If
// this is a latency issue we should be smarter here. Otherwise, it will
// return true if the sweeper threads are done processing the pages.
@@ -753,7 +675,6 @@ class MarkCompactCollector {
bool WillBeDeoptimized(Code* code);
void EvictPopularEvacuationCandidate(Page* page);
- void ClearInvalidSlotsBufferEntries(PagedSpace* space);
void ClearInvalidStoreAndSlotsBufferEntries();
void StartSweeperThreads();
@@ -773,12 +694,6 @@ class MarkCompactCollector {
CollectorState state_;
#endif
- bool reduce_memory_footprint_;
-
- bool abort_incremental_marking_;
-
- bool finalize_incremental_marking_;
-
MarkingParity marking_parity_;
// True if we are collecting slots to perform evacuation from evacuation
@@ -810,11 +725,12 @@ class MarkCompactCollector {
//
// After: Live objects are marked and non-live objects are unmarked.
- friend class RootMarkingVisitor;
- friend class MarkingVisitor;
- friend class MarkCompactMarkingVisitor;
friend class CodeMarkingVisitor;
+ friend class MarkCompactMarkingVisitor;
+ friend class MarkingVisitor;
+ friend class RootMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
+ friend class IncrementalMarkingMarkingVisitor;
// Mark code objects that are active on the stack to prevent them
// from being flushed.
@@ -827,6 +743,14 @@ class MarkCompactCollector {
void AfterMarking();
+ // Pushes a black object onto the marking stack and accounts for live bytes.
+ // Note that this assumes live bytes have not yet been counted.
+ INLINE(void PushBlack(HeapObject* obj));
+
+ // Unshifts a black object into the marking stack and accounts for live bytes.
+ // Note that this assumes lives bytes have already been counted.
+ INLINE(void UnshiftBlack(HeapObject* obj));
+
// Marks the object black and pushes it on the marking stack.
// This is for non-incremental marking only.
INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
@@ -875,6 +799,14 @@ class MarkCompactCollector {
// flag on the marking stack.
void RefillMarkingDeque();
+ // Helper methods for refilling the marking stack by discovering grey objects
+ // on various pages of the heap. Used by {RefillMarkingDeque} only.
+ template <class T>
+ void DiscoverGreyObjectsWithIterator(T* it);
+ void DiscoverGreyObjectsOnPage(MemoryChunk* p);
+ void DiscoverGreyObjectsInSpace(PagedSpace* space);
+ void DiscoverGreyObjectsInNewSpace();
+
// Callback function for telling whether the object *p is an unmarked
// heap object.
static bool IsUnmarkedHeapObject(Object** p);
@@ -969,7 +901,9 @@ class MarkCompactCollector {
List<Page*> evacuation_candidates_;
- SmartPointer<FreeList> free_list_old_space_;
+ base::SmartPointer<FreeList> free_list_old_space_;
+ base::SmartPointer<FreeList> free_list_code_space_;
+ base::SmartPointer<FreeList> free_list_map_space_;
friend class Heap;
};
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index df827bae29..25378b5911 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -17,9 +17,12 @@ const int MemoryReducer::kShortDelayMs = 500;
const int MemoryReducer::kWatchdogDelayMs = 100000;
const int MemoryReducer::kMaxNumberOfGCs = 3;
+MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
+ : CancelableTask(memory_reducer->heap()->isolate()),
+ memory_reducer_(memory_reducer) {}
-void MemoryReducer::TimerTask::Run() {
- if (heap_is_torn_down_) return;
+
+void MemoryReducer::TimerTask::RunInternal() {
Heap* heap = memory_reducer_->heap();
Event event;
double time_ms = heap->MonotonicallyIncreasingTimeInMs();
@@ -36,10 +39,8 @@ void MemoryReducer::TimerTask::Run() {
void MemoryReducer::NotifyTimer(const Event& event) {
- DCHECK(nullptr != pending_task_);
DCHECK_EQ(kTimer, event.type);
DCHECK_EQ(kWait, state_.action);
- pending_task_ = nullptr;
state_ = Step(state_, event);
if (state_.action == kRun) {
DCHECK(heap()->incremental_marking()->IsStopped());
@@ -192,27 +193,13 @@ void MemoryReducer::ScheduleTimer(double delay_ms) {
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
- DCHECK(nullptr == pending_task_);
- pending_task_ = new MemoryReducer::TimerTask(this);
+ auto timer_task = new MemoryReducer::TimerTask(this);
V8::GetCurrentPlatform()->CallDelayedOnForegroundThread(
- isolate, pending_task_, (delay_ms + kSlackMs) / 1000.0);
+ isolate, timer_task, (delay_ms + kSlackMs) / 1000.0);
}
-void MemoryReducer::ClearTask(v8::Task* task) {
- if (pending_task_ == task) {
- pending_task_ = nullptr;
- }
-}
-
-
-void MemoryReducer::TearDown() {
- if (pending_task_ != nullptr) {
- pending_task_->NotifyHeapTearDown();
- pending_task_ = nullptr;
- }
- state_ = State(kDone, 0, 0, 0.0);
-}
+void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0); }
} // internal
} // v8
diff --git a/deps/v8/src/heap/memory-reducer.h b/deps/v8/src/heap/memory-reducer.h
index c387322172..f98cb045e3 100644
--- a/deps/v8/src/heap/memory-reducer.h
+++ b/deps/v8/src/heap/memory-reducer.h
@@ -7,6 +7,7 @@
#include "include/v8-platform.h"
#include "src/base/macros.h"
+#include "src/cancelable-task.h"
namespace v8 {
namespace internal {
@@ -111,9 +112,8 @@ class MemoryReducer {
};
explicit MemoryReducer(Heap* heap)
- : heap_(heap), state_(kDone, 0, 0.0, 0.0), pending_task_(nullptr) {}
+ : heap_(heap), state_(kDone, 0, 0.0, 0.0) {}
// Callbacks.
- void NotifyTimer(const Event& event);
void NotifyMarkCompact(const Event& event);
void NotifyContextDisposed(const Event& event);
void NotifyBackgroundIdleNotification(const Event& event);
@@ -123,10 +123,6 @@ class MemoryReducer {
// Posts a timer task that will call NotifyTimer after the given delay.
void ScheduleTimer(double delay_ms);
void TearDown();
- void ClearTask(v8::Task* task);
-
- static bool WatchdogGC(const State& state, const Event& event);
-
static const int kLongDelayMs;
static const int kShortDelayMs;
static const int kWatchdogDelayMs;
@@ -139,28 +135,23 @@ class MemoryReducer {
}
private:
- class TimerTask : public v8::Task {
+ class TimerTask : public v8::internal::CancelableTask {
public:
- explicit TimerTask(MemoryReducer* memory_reducer)
- : memory_reducer_(memory_reducer), heap_is_torn_down_(false) {}
- virtual ~TimerTask() {
- if (!heap_is_torn_down_) {
- memory_reducer_->ClearTask(this);
- }
- }
- void NotifyHeapTearDown() { heap_is_torn_down_ = true; }
+ explicit TimerTask(MemoryReducer* memory_reducer);
private:
- // v8::Task overrides.
- void Run() override;
+ // v8::internal::CancelableTask overrides.
+ void RunInternal() override;
MemoryReducer* memory_reducer_;
- bool heap_is_torn_down_;
DISALLOW_COPY_AND_ASSIGN(TimerTask);
};
+
+ void NotifyTimer(const Event& event);
+
+ static bool WatchdogGC(const State& state, const Event& event);
+
Heap* heap_;
State state_;
- TimerTask* pending_task_;
-
DISALLOW_COPY_AND_ASSIGN(MemoryReducer);
};
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index bdb801a1f9..21b770d8c5 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -6,10 +6,19 @@
#define V8_OBJECTS_VISITING_INL_H_
#include "src/heap/objects-visiting.h"
+#include "src/ic/ic-state.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
+
+template <typename Callback>
+Callback VisitorDispatchTable<Callback>::GetVisitor(Map* map) {
+ return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
+}
+
+
template <typename StaticVisitor>
void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.Register(
@@ -42,6 +51,7 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
int>::Visit);
table_.Register(kVisitByteArray, &VisitByteArray);
+ table_.Register(kVisitBytecodeArray, &VisitBytecodeArray);
table_.Register(
kVisitSharedFunctionInfo,
@@ -81,8 +91,10 @@ int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
- JSArrayBuffer::JSArrayBufferIterateBody<
- StaticNewSpaceVisitor<StaticVisitor> >(heap, object);
+ VisitPointers(
+ heap, object,
+ HeapObject::RawField(object, JSArrayBuffer::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields));
if (!JSArrayBuffer::cast(object)->is_external()) {
heap->RegisterLiveArrayBuffer(true,
JSArrayBuffer::cast(object)->backing_store());
@@ -95,7 +107,7 @@ template <typename StaticVisitor>
int StaticNewSpaceVisitor<StaticVisitor>::VisitJSTypedArray(
Map* map, HeapObject* object) {
VisitPointers(
- map->GetHeap(),
+ map->GetHeap(), object,
HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
return JSTypedArray::kSizeWithInternalFields;
@@ -106,7 +118,7 @@ template <typename StaticVisitor>
int StaticNewSpaceVisitor<StaticVisitor>::VisitJSDataView(Map* map,
HeapObject* object) {
VisitPointers(
- map->GetHeap(),
+ map->GetHeap(), object,
HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
return JSDataView::kSizeWithInternalFields;
@@ -145,6 +157,8 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
+ table_.Register(kVisitBytecodeArray, &DataObjectVisitor::Visit);
+
table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
table_.Register(kVisitSeqOneByteString, &DataObjectVisitor::Visit);
@@ -194,9 +208,10 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitCodeEntry(
- Heap* heap, Address entry_address) {
+ Heap* heap, HeapObject* object, Address entry_address) {
Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
- heap->mark_compact_collector()->RecordCodeEntrySlot(entry_address, code);
+ heap->mark_compact_collector()->RecordCodeEntrySlot(object, entry_address,
+ code);
StaticVisitor::MarkObject(heap, code);
}
@@ -231,11 +246,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCell(Heap* heap,
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitDebugTarget(Heap* heap,
RelocInfo* rinfo) {
- DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+ DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence());
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
StaticVisitor::MarkObject(heap, target);
}
@@ -282,7 +295,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
for (int idx = Context::FIRST_WEAK_SLOT; idx < Context::NATIVE_CONTEXT_SLOTS;
++idx) {
Object** slot = Context::cast(object)->RawFieldOfElementAt(idx);
- collector->RecordSlot(slot, slot, *slot);
+ collector->RecordSlot(object, slot, *slot);
}
}
@@ -304,7 +317,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitMap(Map* map,
MarkMapContents(heap, map_object);
} else {
StaticVisitor::VisitPointers(
- heap, HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
+ heap, object,
+ HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
}
}
@@ -316,7 +330,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitPropertyCell(
Heap* heap = map->GetHeap();
StaticVisitor::VisitPointers(
- heap,
+ heap, object,
HeapObject::RawField(object, PropertyCell::kPointerFieldsBeginOffset),
HeapObject::RawField(object, PropertyCell::kPointerFieldsEndOffset));
}
@@ -344,7 +358,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite(
Heap* heap = map->GetHeap();
StaticVisitor::VisitPointers(
- heap,
+ heap, object,
HeapObject::RawField(object, AllocationSite::kPointerFieldsBeginOffset),
HeapObject::RawField(object, AllocationSite::kPointerFieldsEndOffset));
}
@@ -366,7 +380,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
// Skip visiting the backing hash table containing the mappings and the
// pointer to the other enqueued weak collections, both are post-processed.
StaticVisitor::VisitPointers(
- heap, HeapObject::RawField(object, JSWeakCollection::kPropertiesOffset),
+ heap, object,
+ HeapObject::RawField(object, JSWeakCollection::kPropertiesOffset),
HeapObject::RawField(object, JSWeakCollection::kTableOffset));
STATIC_ASSERT(JSWeakCollection::kTableOffset + kPointerSize ==
JSWeakCollection::kNextOffset);
@@ -379,7 +394,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
// Mark the backing hash table without pushing it on the marking stack.
Object** slot = HeapObject::RawField(object, JSWeakCollection::kTableOffset);
HeapObject* obj = HeapObject::cast(*slot);
- heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+ heap->mark_compact_collector()->RecordSlot(object, slot, obj);
StaticVisitor::MarkObjectWithoutPush(heap, obj);
}
@@ -419,10 +434,10 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
// Add the shared function info holding an optimized code map to
// the code flusher for processing of code maps after marking.
collector->code_flusher()->AddOptimizedCodeMap(shared);
- // Treat all references within the code map weakly by marking the
+ // Treat some references within the code map weakly by marking the
// code map itself but not pushing it onto the marking deque.
FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
- StaticVisitor::MarkObjectWithoutPush(heap, code_map);
+ MarkOptimizedCodeMap(heap, code_map);
}
if (IsFlushable(heap, shared)) {
// This function's code looks flushable. But we have to postpone
@@ -489,9 +504,10 @@ template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(Map* map,
HeapObject* object) {
int last_property_offset =
- JSRegExp::kSize + kPointerSize * map->inobject_properties();
+ JSRegExp::kSize + kPointerSize * map->GetInObjectProperties();
StaticVisitor::VisitPointers(
- map->GetHeap(), HeapObject::RawField(object, JSRegExp::kPropertiesOffset),
+ map->GetHeap(), object,
+ HeapObject::RawField(object, JSRegExp::kPropertiesOffset),
HeapObject::RawField(object, last_property_offset));
}
@@ -501,7 +517,10 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
- JSArrayBuffer::JSArrayBufferIterateBody<StaticVisitor>(heap, object);
+ StaticVisitor::VisitPointers(
+ heap, object,
+ HeapObject::RawField(object, JSArrayBuffer::BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields));
if (!JSArrayBuffer::cast(object)->is_external()) {
heap->RegisterLiveArrayBuffer(false,
JSArrayBuffer::cast(object)->backing_store());
@@ -513,7 +532,7 @@ template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSTypedArray(
Map* map, HeapObject* object) {
StaticVisitor::VisitPointers(
- map->GetHeap(),
+ map->GetHeap(), object,
HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
}
@@ -523,7 +542,7 @@ template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSDataView(Map* map,
HeapObject* object) {
StaticVisitor::VisitPointers(
- map->GetHeap(),
+ map->GetHeap(), object,
HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
}
@@ -547,13 +566,14 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
DescriptorArray* descriptors = map->instance_descriptors();
if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) &&
descriptors->length() > 0) {
- StaticVisitor::VisitPointers(heap, descriptors->GetFirstElementAddress(),
+ StaticVisitor::VisitPointers(heap, descriptors,
+ descriptors->GetFirstElementAddress(),
descriptors->GetDescriptorEndSlot(0));
}
int start = 0;
int end = map->NumberOfOwnDescriptors();
if (start < end) {
- StaticVisitor::VisitPointers(heap,
+ StaticVisitor::VisitPointers(heap, descriptors,
descriptors->GetDescriptorStartSlot(start),
descriptors->GetDescriptorEndSlot(end));
}
@@ -563,7 +583,7 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
// been marked already, it is fine that one of these fields contains a
// pointer to it.
StaticVisitor::VisitPointers(
- heap, HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
+ heap, map, HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
}
@@ -574,13 +594,30 @@ void StaticMarkingVisitor<StaticVisitor>::MarkTransitionArray(
if (!StaticVisitor::MarkObjectWithoutPush(heap, transitions)) return;
if (transitions->HasPrototypeTransitions()) {
- StaticVisitor::VisitPointer(heap,
+ StaticVisitor::VisitPointer(heap, transitions,
transitions->GetPrototypeTransitionsSlot());
}
int num_transitions = TransitionArray::NumberOfTransitions(transitions);
for (int i = 0; i < num_transitions; ++i) {
- StaticVisitor::VisitPointer(heap, transitions->GetKeySlot(i));
+ StaticVisitor::VisitPointer(heap, transitions, transitions->GetKeySlot(i));
+ }
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::MarkOptimizedCodeMap(
+ Heap* heap, FixedArray* code_map) {
+ if (!StaticVisitor::MarkObjectWithoutPush(heap, code_map)) return;
+
+ // Mark the context-independent entry in the optimized code map. Depending on
+ // the age of the code object, we treat it as a strong or a weak reference.
+ Object* shared_object = code_map->get(SharedFunctionInfo::kSharedCodeIndex);
+ if (FLAG_turbo_preserve_shared_code && shared_object->IsCode() &&
+ FLAG_age_code && !Code::cast(shared_object)->IsOld()) {
+ StaticVisitor::VisitPointer(
+ heap, code_map,
+ code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex));
}
}
@@ -711,7 +748,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoStrongCode(
object, SharedFunctionInfo::BodyDescriptor::kStartOffset);
Object** end_slot = HeapObject::RawField(
object, SharedFunctionInfo::BodyDescriptor::kEndOffset);
- StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+ StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
}
@@ -720,7 +757,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode(
Heap* heap, HeapObject* object) {
Object** name_slot =
HeapObject::RawField(object, SharedFunctionInfo::kNameOffset);
- StaticVisitor::VisitPointer(heap, name_slot);
+ StaticVisitor::VisitPointer(heap, object, name_slot);
// Skip visiting kCodeOffset as it is treated weakly here.
STATIC_ASSERT(SharedFunctionInfo::kNameOffset + kPointerSize ==
@@ -732,7 +769,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode(
HeapObject::RawField(object, SharedFunctionInfo::kOptimizedCodeMapOffset);
Object** end_slot = HeapObject::RawField(
object, SharedFunctionInfo::BodyDescriptor::kEndOffset);
- StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+ StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
}
@@ -743,16 +780,17 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode(
HeapObject::RawField(object, JSFunction::kPropertiesOffset);
Object** end_slot =
HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
- StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+ StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
- VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
+ VisitCodeEntry(heap, object,
+ object->address() + JSFunction::kCodeEntryOffset);
STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
JSFunction::kPrototypeOrInitialMapOffset);
start_slot =
HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
end_slot = HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
- StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+ StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
}
@@ -763,7 +801,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
HeapObject::RawField(object, JSFunction::kPropertiesOffset);
Object** end_slot =
HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
- StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+ StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
// Skip visiting kCodeEntryOffset as it is treated weakly here.
STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
@@ -772,7 +810,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
start_slot =
HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
end_slot = HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
- StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+ StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
}
@@ -783,9 +821,8 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
- RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
- RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::kDebugBreakSlotMask;
// There are two places where we iterate code bodies: here and the
// templated CodeIterateBody (below). They should be kept in sync.
@@ -811,22 +848,22 @@ void Code::CodeIterateBody(Heap* heap) {
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
- RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
- RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::kDebugBreakSlotMask;
// There are two places where we iterate code bodies: here and the non-
// templated CodeIterateBody (above). They should be kept in sync.
StaticVisitor::VisitPointer(
- heap,
+ heap, this,
reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
StaticVisitor::VisitPointer(
- heap, reinterpret_cast<Object**>(this->address() + kHandlerTableOffset));
+ heap, this,
+ reinterpret_cast<Object**>(this->address() + kHandlerTableOffset));
StaticVisitor::VisitPointer(
- heap,
+ heap, this,
reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
StaticVisitor::VisitPointer(
- heap,
+ heap, this,
reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset));
StaticVisitor::VisitNextCodeLink(
heap, reinterpret_cast<Object**>(this->address() + kNextCodeLinkOffset));
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 49ce4f97ab..d29b99eb05 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -2,14 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/heap/objects-visiting.h"
+#include "src/heap/mark-compact-inl.h"
+#include "src/heap/objects-visiting-inl.h"
+
namespace v8 {
namespace internal {
+StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(Map* map) {
+ return GetVisitorId(map->instance_type(), map->instance_size(),
+ FLAG_unbox_double_fields && !map->HasFastPointerLayout());
+}
+
+
StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
int instance_type, int instance_size, bool has_unboxed_fields) {
if (instance_type < FIRST_NONSTRING_TYPE) {
@@ -42,6 +49,9 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case BYTE_ARRAY_TYPE:
return kVisitByteArray;
+ case BYTECODE_ARRAY_TYPE:
+ return kVisitBytecodeArray;
+
case FREE_SPACE_TYPE:
return kVisitFreeSpace;
@@ -135,14 +145,9 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
- case FLOAT32X4_TYPE:
-#define EXTERNAL_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ARRAY_TYPE:
-
- TYPED_ARRAYS(EXTERNAL_ARRAY_CASE)
+ case SIMD128_VALUE_TYPE:
return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
instance_size, has_unboxed_fields);
-#undef EXTERNAL_ARRAY_CASE
case FIXED_UINT8_ARRAY_TYPE:
case FIXED_INT8_ARRAY_TYPE:
@@ -174,6 +179,135 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
}
+void HeapObject::IterateBody(InstanceType type, int object_size,
+ ObjectVisitor* v) {
+ // Avoiding <Type>::cast(this) because it accesses the map pointer field.
+ // During GC, the map pointer field is encoded.
+ if (type < FIRST_NONSTRING_TYPE) {
+ switch (type & kStringRepresentationMask) {
+ case kSeqStringTag:
+ break;
+ case kConsStringTag:
+ ConsString::BodyDescriptor::IterateBody(this, v);
+ break;
+ case kSlicedStringTag:
+ SlicedString::BodyDescriptor::IterateBody(this, v);
+ break;
+ case kExternalStringTag:
+ if ((type & kStringEncodingMask) == kOneByteStringTag) {
+ reinterpret_cast<ExternalOneByteString*>(this)
+ ->ExternalOneByteStringIterateBody(v);
+ } else {
+ reinterpret_cast<ExternalTwoByteString*>(this)
+ ->ExternalTwoByteStringIterateBody(v);
+ }
+ break;
+ }
+ return;
+ }
+
+ switch (type) {
+ case FIXED_ARRAY_TYPE:
+ FixedArray::BodyDescriptor::IterateBody(this, object_size, v);
+ break;
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ break;
+ case JS_OBJECT_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
+ case JS_MODULE_TYPE:
+ case JS_VALUE_TYPE:
+ case JS_DATE_TYPE:
+ case JS_ARRAY_TYPE:
+ case JS_ARRAY_BUFFER_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
+ case JS_SET_ITERATOR_TYPE:
+ case JS_MAP_ITERATOR_TYPE:
+ case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
+ case JS_REGEXP_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_BUILTINS_OBJECT_TYPE:
+ case JS_MESSAGE_OBJECT_TYPE:
+ JSObject::BodyDescriptor::IterateBody(this, object_size, v);
+ break;
+ case JS_FUNCTION_TYPE:
+ reinterpret_cast<JSFunction*>(this)
+ ->JSFunctionIterateBody(object_size, v);
+ break;
+ case ODDBALL_TYPE:
+ Oddball::BodyDescriptor::IterateBody(this, v);
+ break;
+ case JS_PROXY_TYPE:
+ JSProxy::BodyDescriptor::IterateBody(this, v);
+ break;
+ case JS_FUNCTION_PROXY_TYPE:
+ JSFunctionProxy::BodyDescriptor::IterateBody(this, v);
+ break;
+ case FOREIGN_TYPE:
+ reinterpret_cast<Foreign*>(this)->ForeignIterateBody(v);
+ break;
+ case MAP_TYPE:
+ Map::BodyDescriptor::IterateBody(this, v);
+ break;
+ case CODE_TYPE:
+ reinterpret_cast<Code*>(this)->CodeIterateBody(v);
+ break;
+ case CELL_TYPE:
+ Cell::BodyDescriptor::IterateBody(this, v);
+ break;
+ case PROPERTY_CELL_TYPE:
+ PropertyCell::BodyDescriptor::IterateBody(this, v);
+ break;
+ case WEAK_CELL_TYPE:
+ WeakCell::BodyDescriptor::IterateBody(this, v);
+ break;
+ case SYMBOL_TYPE:
+ Symbol::BodyDescriptor::IterateBody(this, v);
+ break;
+
+ case HEAP_NUMBER_TYPE:
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ case SIMD128_VALUE_TYPE:
+ case FILLER_TYPE:
+ case BYTE_ARRAY_TYPE:
+ case BYTECODE_ARRAY_TYPE:
+ case FREE_SPACE_TYPE:
+ break;
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ reinterpret_cast<FixedTypedArrayBase*>(this) \
+ ->FixedTypedArrayBaseIterateBody(v); \
+ break;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ case SHARED_FUNCTION_INFO_TYPE: {
+ SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
+ break;
+ }
+
+#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ if (type == ALLOCATION_SITE_TYPE) {
+ AllocationSite::BodyDescriptor::IterateBody(this, v);
+ } else {
+ StructBodyDescriptor::IterateBody(this, object_size, v);
+ }
+ break;
+ default:
+ PrintF("Unknown type: %d\n", type);
+ UNREACHABLE();
+ }
+}
+
+
// We don't record weak slots during marking or scavenges. Instead we do it
// once when we complete mark-compact cycle. Note that write barrier has no
// effect if we are already in the middle of compacting mark-sweep cycle and we
@@ -212,7 +346,7 @@ Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
if (record_slots) {
Object** next_slot =
HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
- collector->RecordSlot(next_slot, next_slot, retained);
+ collector->RecordSlot(tail, next_slot, retained);
}
}
// Retained object is new tail.
@@ -323,8 +457,7 @@ struct WeakListVisitor<Context> {
// Record the updated slot if necessary.
Object** head_slot =
HeapObject::RawField(context, FixedArray::SizeFor(index));
- heap->mark_compact_collector()->RecordSlot(head_slot, head_slot,
- list_head);
+ heap->mark_compact_collector()->RecordSlot(context, head_slot, list_head);
}
}
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 1b788e893b..5b150cf199 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_VISITING_H_
#include "src/allocation.h"
+#include "src/heap/spaces.h"
#include "src/layout-descriptor.h"
// This file provides base classes and auxiliary methods for defining
@@ -29,6 +30,7 @@ class StaticVisitorBase : public AllStatic {
V(SeqTwoByteString) \
V(ShortcutCandidate) \
V(ByteArray) \
+ V(BytecodeArray) \
V(FreeSpace) \
V(FixedArray) \
V(FixedDoubleArray) \
@@ -109,11 +111,7 @@ class StaticVisitorBase : public AllStatic {
bool has_unboxed_fields);
// Determine which specialized visitor should be used for given map.
- static VisitorId GetVisitorId(Map* map) {
- return GetVisitorId(
- map->instance_type(), map->instance_size(),
- FLAG_unbox_double_fields && !map->HasFastPointerLayout());
- }
+ static VisitorId GetVisitorId(Map* map);
// For visitors that allow specialization by size calculate VisitorId based
// on size, base visitor id and generic visitor id.
@@ -150,14 +148,12 @@ class VisitorDispatchTable {
}
}
+ inline Callback GetVisitor(Map* map);
+
inline Callback GetVisitorById(StaticVisitorBase::VisitorId id) {
return reinterpret_cast<Callback>(callbacks_[id]);
}
- inline Callback GetVisitor(Map* map) {
- return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
- }
-
void Register(StaticVisitorBase::VisitorId id, Callback callback) {
DCHECK(id < StaticVisitorBase::kVisitorIdCount); // id is unsigned.
callbacks_[id] = reinterpret_cast<base::AtomicWord>(callback);
@@ -214,7 +210,7 @@ class BodyVisitorBase : public AllStatic {
private:
INLINE(static void IterateRawPointers(Heap* heap, HeapObject* object,
int start_offset, int end_offset)) {
- StaticVisitor::VisitPointers(heap,
+ StaticVisitor::VisitPointers(heap, object,
HeapObject::RawField(object, start_offset),
HeapObject::RawField(object, end_offset));
}
@@ -296,22 +292,23 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return table_.GetVisitor(map)(map, obj);
}
- INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
+ INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
+ Object** start, Object** end)) {
for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
}
private:
INLINE(static int VisitJSFunction(Map* map, HeapObject* object)) {
Heap* heap = map->GetHeap();
- VisitPointers(heap,
+ VisitPointers(heap, object,
HeapObject::RawField(object, JSFunction::kPropertiesOffset),
HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
// Don't visit code entry. We are using this visitor only during scavenges.
VisitPointers(
- heap, HeapObject::RawField(object,
- JSFunction::kCodeEntryOffset + kPointerSize),
+ heap, object, HeapObject::RawField(
+ object, JSFunction::kCodeEntryOffset + kPointerSize),
HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset));
return JSFunction::kSize;
}
@@ -320,6 +317,10 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
}
+ INLINE(static int VisitBytecodeArray(Map* map, HeapObject* object)) {
+ return reinterpret_cast<BytecodeArray*>(object)->BytecodeArraySize();
+ }
+
INLINE(static int VisitFixedDoubleArray(Map* map, HeapObject* object)) {
int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
return FixedDoubleArray::SizeFor(length);
@@ -405,7 +406,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
INLINE(static void VisitWeakCell(Map* map, HeapObject* object));
- INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address));
+ INLINE(static void VisitCodeEntry(Heap* heap, HeapObject* object,
+ Address entry_address));
INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo));
INLINE(static void VisitDebugTarget(Heap* heap, RelocInfo* rinfo));
@@ -417,7 +419,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Skip the weak next code link in a code object.
INLINE(static void VisitNextCodeLink(Heap* heap, Object** slot)) {}
- // TODO(mstarzinger): This should be made protected once refactoring is done.
// Mark non-optimize code for functions inlined into the given optimized
// code. This will prevent it from being flushed.
static void MarkInlinedFunctionsCode(Heap* heap, Code* code);
@@ -440,6 +441,10 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static void MarkMapContents(Heap* heap, Map* map);
static void MarkTransitionArray(Heap* heap, TransitionArray* transitions);
+ // Mark pointers in the optimized code map that should act as strong
+ // references, possibly treating some entries weak.
+ static void MarkOptimizedCodeMap(Heap* heap, FixedArray* code_map);
+
// Code flushing support.
INLINE(static bool IsFlushable(Heap* heap, JSFunction* function));
INLINE(static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info));
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index c2c4d12697..762558e11b 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -28,7 +28,6 @@ void Bitmap::Clear(MemoryChunk* chunk) {
// -----------------------------------------------------------------------------
// PageIterator
-
PageIterator::PageIterator(PagedSpace* space)
: space_(space),
prev_page_(&space->anchor_),
@@ -47,9 +46,32 @@ Page* PageIterator::next() {
// -----------------------------------------------------------------------------
-// NewSpacePageIterator
+// SemiSpaceIterator
+
+HeapObject* SemiSpaceIterator::Next() {
+ if (current_ == limit_) return NULL;
+ if (NewSpacePage::IsAtEnd(current_)) {
+ NewSpacePage* page = NewSpacePage::FromLimit(current_);
+ page = page->next_page();
+ DCHECK(!page->is_anchor());
+ current_ = page->area_start();
+ if (current_ == limit_) return NULL;
+ }
+
+ HeapObject* object = HeapObject::FromAddress(current_);
+ int size = object->Size();
+
+ current_ += size;
+ return object;
+}
+HeapObject* SemiSpaceIterator::next_object() { return Next(); }
+
+
+// -----------------------------------------------------------------------------
+// NewSpacePageIterator
+
NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
: prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
@@ -81,6 +103,19 @@ NewSpacePage* NewSpacePageIterator::next() {
// -----------------------------------------------------------------------------
// HeapObjectIterator
+
+HeapObject* HeapObjectIterator::Next() {
+ do {
+ HeapObject* next_obj = FromCurrentPage();
+ if (next_obj != NULL) return next_obj;
+ } while (AdvanceToNextPage());
+ return NULL;
+}
+
+
+HeapObject* HeapObjectIterator::next_object() { return Next(); }
+
+
HeapObject* HeapObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
@@ -88,7 +123,7 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
continue;
}
HeapObject* obj = HeapObject::FromAddress(cur_addr_);
- int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+ int obj_size = obj->Size();
cur_addr_ += obj_size;
DCHECK(cur_addr_ <= cur_end_);
// TODO(hpayer): Remove the debugging code.
@@ -138,10 +173,21 @@ void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
// --------------------------------------------------------------------------
+// AllocationResult
+
+AllocationSpace AllocationResult::RetrySpace() {
+ DCHECK(IsRetry());
+ return static_cast<AllocationSpace>(Smi::cast(object_)->value());
+}
+
+
+// --------------------------------------------------------------------------
// PagedSpace
+
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
+ page->mutex_ = new base::Mutex();
DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
DCHECK(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size());
@@ -160,6 +206,9 @@ bool PagedSpace::Contains(Address addr) {
}
+bool PagedSpace::Contains(HeapObject* o) { return Contains(o->address()); }
+
+
void MemoryChunk::set_scan_on_scavenge(bool scan) {
if (scan) {
if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
@@ -192,19 +241,6 @@ MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
}
-void MemoryChunk::UpdateHighWaterMark(Address mark) {
- if (mark == NULL) return;
- // Need to subtract one from the mark because when a chunk is full the
- // top points to the next address after the chunk, which effectively belongs
- // to another chunk. See the comment to Page::FromAllocationTop.
- MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
- int new_mark = static_cast<int>(mark - chunk->address());
- if (new_mark > chunk->high_water_mark_) {
- chunk->high_water_mark_ = new_mark;
- }
-}
-
-
PointerChunkIterator::PointerChunkIterator(Heap* heap)
: state_(kOldSpaceState),
old_iterator_(heap->old_space()),
@@ -212,15 +248,43 @@ PointerChunkIterator::PointerChunkIterator(Heap* heap)
lo_iterator_(heap->lo_space()) {}
-Page* Page::next_page() {
- DCHECK(next_chunk()->owner() == owner());
- return static_cast<Page*>(next_chunk());
-}
-
-
-Page* Page::prev_page() {
- DCHECK(prev_chunk()->owner() == owner());
- return static_cast<Page*>(prev_chunk());
+MemoryChunk* PointerChunkIterator::next() {
+ switch (state_) {
+ case kOldSpaceState: {
+ if (old_iterator_.has_next()) {
+ return old_iterator_.next();
+ }
+ state_ = kMapState;
+ // Fall through.
+ }
+ case kMapState: {
+ if (map_iterator_.has_next()) {
+ return map_iterator_.next();
+ }
+ state_ = kLargeObjectState;
+ // Fall through.
+ }
+ case kLargeObjectState: {
+ HeapObject* heap_object;
+ do {
+ heap_object = lo_iterator_.Next();
+ if (heap_object == NULL) {
+ state_ = kFinishedState;
+ return NULL;
+ }
+ // Fixed arrays are the only pointer-containing objects in large
+ // object space.
+ } while (!heap_object->IsFixedArray());
+ MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
+ return answer;
+ }
+ case kFinishedState:
+ return NULL;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return NULL;
}
@@ -344,23 +408,29 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
- Address old_top = allocation_info_.top();
- int filler_size = Heap::GetFillToAlign(old_top, alignment);
+ Address top = allocation_info_.top();
+ int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
- if (allocation_info_.limit() - old_top < aligned_size_in_bytes) {
- return SlowAllocateRaw(size_in_bytes, alignment);
+ if (allocation_info_.limit() - top < aligned_size_in_bytes) {
+ // See if we can create room.
+ if (!EnsureAllocation(size_in_bytes, alignment)) {
+ return AllocationResult::Retry();
+ }
+
+ top = allocation_info_.top();
+ filler_size = Heap::GetFillToAlign(top, alignment);
+ aligned_size_in_bytes = size_in_bytes + filler_size;
}
- HeapObject* obj = HeapObject::FromAddress(old_top);
- allocation_info_.set_top(allocation_info_.top() + aligned_size_in_bytes);
+ HeapObject* obj = HeapObject::FromAddress(top);
+ allocation_info_.set_top(top + aligned_size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (filler_size > 0) {
obj = heap()->PrecedeWithFiller(obj, filler_size);
}
- // The slow path above ultimately goes through AllocateRaw, so this suffices.
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
return obj;
@@ -368,17 +438,20 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
- Address old_top = allocation_info_.top();
+ Address top = allocation_info_.top();
+ if (allocation_info_.limit() - top < size_in_bytes) {
+ // See if we can create room.
+ if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
+ return AllocationResult::Retry();
+ }
- if (allocation_info_.limit() - old_top < size_in_bytes) {
- return SlowAllocateRaw(size_in_bytes, kWordAligned);
+ top = allocation_info_.top();
}
- HeapObject* obj = HeapObject::FromAddress(old_top);
- allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
+ HeapObject* obj = HeapObject::FromAddress(top);
+ allocation_info_.set_top(top + size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
- // The slow path above ultimately goes through AllocateRaw, so this suffices.
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
return obj;
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 0806b2565d..e66fd3944c 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/heap/spaces.h"
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/heap/mark-compact.h"
#include "src/macro-assembler.h"
#include "src/msan.h"
@@ -24,40 +24,27 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
// just an anchor for the double linked page list. Initialize as if we have
// reached the end of the anchor page, then the first iteration will move on
// to the first page.
- Initialize(space, NULL, NULL, kAllPagesInSpace, NULL);
+ Initialize(space, NULL, NULL, kAllPagesInSpace);
}
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
- HeapObjectCallback size_func) {
- // You can't actually iterate over the anchor page. It is not a real page,
- // just an anchor for the double linked page list. Initialize the current
- // address and end as NULL, then the first iteration will move on
- // to the first page.
- Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(Page* page,
- HeapObjectCallback size_func) {
+HeapObjectIterator::HeapObjectIterator(Page* page) {
Space* owner = page->owner();
DCHECK(owner == page->heap()->old_space() ||
owner == page->heap()->map_space() ||
owner == page->heap()->code_space());
Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
- page->area_end(), kOnePageOnly, size_func);
+ page->area_end(), kOnePageOnly);
DCHECK(page->WasSwept() || page->SweepingCompleted());
}
void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
- HeapObjectIterator::PageMode mode,
- HeapObjectCallback size_f) {
+ HeapObjectIterator::PageMode mode) {
space_ = space;
cur_addr_ = cur;
cur_end_ = end;
page_mode_ = mode;
- size_func_ = size_f;
}
@@ -75,6 +62,8 @@ bool HeapObjectIterator::AdvanceToNextPage() {
}
cur_page = cur_page->next_page();
if (cur_page == space_->anchor()) return false;
+ cur_page->heap()->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
+ cur_page);
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
@@ -499,6 +488,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<int>(area_start - base);
chunk->set_parallel_sweeping(SWEEPING_DONE);
+ chunk->mutex_ = NULL;
chunk->available_in_small_free_list_ = 0;
chunk->available_in_medium_free_list_ = 0;
chunk->available_in_large_free_list_ = 0;
@@ -765,6 +755,7 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
delete chunk->slots_buffer();
delete chunk->skip_list();
+ delete chunk->mutex();
base::VirtualMemory* reservation = chunk->reserved_memory();
if (reservation->IsReserved()) {
@@ -918,8 +909,8 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
// -----------------------------------------------------------------------------
// MemoryChunk implementation
-void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(address);
+void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
}
@@ -940,7 +931,7 @@ STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
ObjectSpace::kObjectSpaceMapSpace);
-PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace space,
+PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
: Space(heap, space, executable),
free_list_(this),
@@ -948,8 +939,6 @@ PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace space,
end_of_unswept_pages_(NULL),
emergency_memory_(NULL) {
area_size_ = MemoryAllocator::PageAreaSize(space);
- max_capacity_ =
- (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
accounting_stats_.Clear();
allocation_info_.set_top(NULL);
@@ -1005,7 +994,7 @@ Object* PagedSpace::FindObject(Address addr) {
if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
Page* p = Page::FromAddress(addr);
- HeapObjectIterator it(p, NULL);
+ HeapObjectIterator it(p);
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
Address cur = obj->address();
Address next = cur + obj->Size();
@@ -1018,7 +1007,6 @@ Object* PagedSpace::FindObject(Address addr) {
bool PagedSpace::CanExpand() {
- DCHECK(max_capacity_ % AreaSize() == 0);
DCHECK(heap()->mark_compact_collector()->is_compacting() ||
Capacity() <= heap()->MaxOldGenerationSize());
DCHECK(heap()->CommittedOldGenerationMemory() <=
@@ -1182,7 +1170,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
allocation_pointer_found_in_space = true;
}
CHECK(page->WasSwept());
- HeapObjectIterator it(page, NULL);
+ HeapObjectIterator it(page);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
int black_size = 0;
@@ -1465,14 +1453,34 @@ bool NewSpace::AddFreshPage() {
}
-AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
- AllocationAlignment alignment) {
+bool NewSpace::EnsureAllocation(int size_in_bytes,
+ AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
Address high = to_space_.page_high();
- if (allocation_info_.limit() < high) {
- int alignment_size = Heap::GetFillToAlign(old_top, alignment);
- int aligned_size_in_bytes = size_in_bytes + alignment_size;
+ int filler_size = Heap::GetFillToAlign(old_top, alignment);
+ int aligned_size_in_bytes = size_in_bytes + filler_size;
+ if (old_top + aligned_size_in_bytes >= high) {
+ // Not enough room in the page, try to allocate a new one.
+ if (!AddFreshPage()) {
+ return false;
+ }
+
+ // Do a step for the bytes allocated on the last page.
+ int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
+ heap()->incremental_marking()->Step(bytes_allocated,
+ IncrementalMarking::GC_VIA_STACK_GUARD);
+ old_top = allocation_info_.top();
+ top_on_previous_step_ = old_top;
+
+ high = to_space_.page_high();
+ filler_size = Heap::GetFillToAlign(old_top, alignment);
+ aligned_size_in_bytes = size_in_bytes + filler_size;
+ }
+
+ DCHECK(old_top + aligned_size_in_bytes < high);
+
+ if (allocation_info_.limit() < high) {
// Either the limit has been lowered because linear allocation was disabled
// or because incremental marking wants to get a chance to do a step. Set
// the new limit accordingly.
@@ -1482,19 +1490,8 @@ AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
IncrementalMarking::GC_VIA_STACK_GUARD);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
top_on_previous_step_ = new_top;
- if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes);
- return AllocateRawAligned(size_in_bytes, alignment);
- } else if (AddFreshPage()) {
- // Switched to new page. Try allocating again.
- int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated,
- IncrementalMarking::GC_VIA_STACK_GUARD);
- top_on_previous_step_ = to_space_.page_low();
- if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes);
- return AllocateRawAligned(size_in_bytes, alignment);
- } else {
- return AllocationResult::Retry();
}
+ return true;
}
@@ -1846,33 +1843,16 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
// -----------------------------------------------------------------------------
// SemiSpaceIterator implementation.
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
- Initialize(space->bottom(), space->top(), NULL);
-}
-
-
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
- HeapObjectCallback size_func) {
- Initialize(space->bottom(), space->top(), size_func);
-}
-
-
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
- Initialize(start, space->top(), NULL);
-}
-
-SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
- Initialize(from, to, NULL);
+SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
+ Initialize(space->bottom(), space->top());
}
-void SemiSpaceIterator::Initialize(Address start, Address end,
- HeapObjectCallback size_func) {
+void SemiSpaceIterator::Initialize(Address start, Address end) {
SemiSpace::AssertValidRange(start, end);
current_ = start;
limit_ = end;
- size_func_ = size_func;
}
@@ -2415,7 +2395,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
} else if (bytes_left > kThreshold &&
owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
- FLAG_incremental_marking_steps) {
+ FLAG_incremental_marking) {
int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
// We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again whether
@@ -2808,14 +2788,6 @@ void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
current_ = space->first_page_;
- size_func_ = NULL;
-}
-
-
-LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
- HeapObjectCallback size_func) {
- current_ = space->first_page_;
- size_func_ = size_func;
}
@@ -2833,10 +2805,8 @@ HeapObject* LargeObjectIterator::Next() {
static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
-LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
- AllocationSpace id)
+LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
: Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
- max_capacity_(max_capacity),
first_page_(NULL),
size_(0),
page_count_(0),
@@ -2844,6 +2814,9 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
chunk_map_(ComparePointers, 1024) {}
+LargeObjectSpace::~LargeObjectSpace() {}
+
+
bool LargeObjectSpace::SetUp() {
first_page_ = NULL;
size_ = 0;
@@ -2875,12 +2848,10 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->always_allocate() &&
- heap()->OldGenerationAllocationLimitReached()) {
+ !heap()->CanExpandOldGeneration(object_size)) {
return AllocationResult::Retry(identity());
}
- if (!CanAllocateSize(object_size)) return AllocationResult::Retry(identity());
-
LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
if (page == NULL) return AllocationResult::Retry(identity());
@@ -3046,6 +3017,11 @@ void LargeObjectSpace::Verify() {
CHECK(map->IsMap());
CHECK(heap()->map_space()->Contains(map));
+ // Double unboxing in LO space is not allowed. This would break the
+ // lookup mechanism for store and slot buffer entries which use the
+ // page header tag.
+ CHECK(object->ContentType() != HeapObjectContents::kMixedValues);
+
// We have only code, sequential strings, external strings
// (sequential strings that have been morphed into external
// strings), fixed arrays, byte arrays, and constant pool arrays in the
@@ -3122,14 +3098,14 @@ void Page::Print() {
PrintF("Page@%p in %s\n", this->address(),
AllocationSpaceName(this->owner()->identity()));
printf(" --------------------------------------\n");
- HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
+ HeapObjectIterator objects(this);
unsigned mark_size = 0;
for (HeapObject* object = objects.Next(); object != NULL;
object = objects.Next()) {
bool is_marked = Marking::IsBlackOrGrey(Marking::MarkBitFrom(object));
PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
if (is_marked) {
- mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
+ mark_size += object->Size();
}
object->ShortPrint();
PrintF("\n");
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 3461de3ef0..2ea2e909aa 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -9,9 +9,10 @@
#include "src/base/atomicops.h"
#include "src/base/bits.h"
#include "src/base/platform/mutex.h"
+#include "src/flags.h"
#include "src/hashmap.h"
#include "src/list.h"
-#include "src/log.h"
+#include "src/objects.h"
#include "src/utils.h"
namespace v8 {
@@ -452,10 +453,17 @@ class MemoryChunk {
base::Release_Store(&parallel_sweeping_, state);
}
- bool TryParallelSweeping() {
- return base::Acquire_CompareAndSwap(&parallel_sweeping_, SWEEPING_PENDING,
- SWEEPING_IN_PROGRESS) ==
- SWEEPING_PENDING;
+ bool TryLock() { return mutex_->TryLock(); }
+
+ base::Mutex* mutex() { return mutex_; }
+
+ // WaitUntilSweepingCompleted only works when concurrent sweeping is in
+ // progress. In particular, when we know that right before this call a
+ // sweeper thread was sweeping this page.
+ void WaitUntilSweepingCompleted() {
+ mutex_->Lock();
+ mutex_->Unlock();
+ DCHECK(SweepingCompleted());
}
bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; }
@@ -515,11 +523,11 @@ class MemoryChunk {
progress_bar();
}
- static void IncrementLiveBytesFromGC(Address address, int by) {
- MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
+ static void IncrementLiveBytesFromGC(HeapObject* object, int by) {
+ MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
}
- static void IncrementLiveBytesFromMutator(Address address, int by);
+ static void IncrementLiveBytesFromMutator(HeapObject* object, int by);
static const intptr_t kAlignment =
(static_cast<uintptr_t>(1) << kPageSizeBits);
@@ -537,9 +545,15 @@ class MemoryChunk {
static const size_t kWriteBarrierCounterOffset =
kSlotsBufferOffset + kPointerSize + kPointerSize;
- static const size_t kHeaderSize =
- kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize +
- kPointerSize + 5 * kPointerSize + kPointerSize + kPointerSize;
+ static const size_t kHeaderSize = kWriteBarrierCounterOffset +
+ kPointerSize + // write_barrier_counter_
+ kIntSize + // progress_bar_
+ kIntSize + // high_water_mark_
+ kPointerSize + // mutex_ page lock
+ kPointerSize + // parallel_sweeping_
+ 5 * kPointerSize + // free list statistics
+ kPointerSize + // next_chunk_
+ kPointerSize; // prev_chunk_
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -643,7 +657,17 @@ class MemoryChunk {
// Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory() { return high_water_mark_; }
- static inline void UpdateHighWaterMark(Address mark);
+ static inline void UpdateHighWaterMark(Address mark) {
+ if (mark == NULL) return;
+ // Need to subtract one from the mark because when a chunk is full the
+ // top points to the next address after the chunk, which effectively belongs
+ // to another chunk. See the comment to Page::FromAllocationTop.
+ MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
+ int new_mark = static_cast<int>(mark - chunk->address());
+ if (new_mark > chunk->high_water_mark_) {
+ chunk->high_water_mark_ = new_mark;
+ }
+ }
protected:
size_t size_;
@@ -675,6 +699,7 @@ class MemoryChunk {
// count highest number of bytes ever allocated on the page.
int high_water_mark_;
+ base::Mutex* mutex_;
base::AtomicWord parallel_sweeping_;
// PagedSpace free-list statistics.
@@ -727,8 +752,14 @@ class Page : public MemoryChunk {
}
// Returns the next page in the chain of pages owned by a space.
- inline Page* next_page();
- inline Page* prev_page();
+ inline Page* next_page() {
+ DCHECK(next_chunk()->owner() == owner());
+ return static_cast<Page*>(next_chunk());
+ }
+ inline Page* prev_page() {
+ DCHECK(prev_chunk()->owner() == owner());
+ return static_cast<Page*>(prev_chunk());
+ }
inline void set_next_page(Page* page);
inline void set_prev_page(Page* page);
@@ -1226,31 +1257,20 @@ class ObjectIterator : public Malloced {
class HeapObjectIterator : public ObjectIterator {
public:
// Creates a new object iterator in a given space.
- // If the size function is not given, the iterator calls the default
- // Object::Size().
explicit HeapObjectIterator(PagedSpace* space);
- HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
- HeapObjectIterator(Page* page, HeapObjectCallback size_func);
+ explicit HeapObjectIterator(Page* page);
// Advance to the next object, skipping free spaces and other fillers and
// skipping the special garbage section of which there is one per space.
// Returns NULL when the iteration has ended.
- inline HeapObject* Next() {
- do {
- HeapObject* next_obj = FromCurrentPage();
- if (next_obj != NULL) return next_obj;
- } while (AdvanceToNextPage());
- return NULL;
- }
-
- virtual HeapObject* next_object() { return Next(); }
+ inline HeapObject* Next();
+ virtual inline HeapObject* next_object();
private:
enum PageMode { kOnePageOnly, kAllPagesInSpace };
Address cur_addr_; // Current iteration point.
Address cur_end_; // End iteration point.
- HeapObjectCallback size_func_; // Size function or NULL.
PagedSpace* space_;
PageMode page_mode_;
@@ -1263,7 +1283,7 @@ class HeapObjectIterator : public ObjectIterator {
// Initializes fields.
inline void Initialize(PagedSpace* owner, Address start, Address end,
- PageMode mode, HeapObjectCallback size_func);
+ PageMode mode);
};
@@ -1635,10 +1655,7 @@ class AllocationResult {
return object_;
}
- AllocationSpace RetrySpace() {
- DCHECK(IsRetry());
- return static_cast<AllocationSpace>(Smi::cast(object_)->value());
- }
+ inline AllocationSpace RetrySpace();
private:
explicit AllocationResult(AllocationSpace space)
@@ -1653,9 +1670,8 @@ STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
class PagedSpace : public Space {
public:
- // Creates a space with a maximum capacity, and an id.
- PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
- Executability executable);
+ // Creates a space with an id.
+ PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
virtual ~PagedSpace() {}
@@ -1675,7 +1691,7 @@ class PagedSpace : public Space {
// Checks whether an object/address is in this space.
inline bool Contains(Address a);
- bool Contains(HeapObject* o) { return Contains(o->address()); }
+ inline bool Contains(HeapObject* o);
// Unlike Contains() methods it is safe to call this one even for addresses
// of unmapped memory.
bool ContainsSafe(Address addr);
@@ -1892,9 +1908,6 @@ class PagedSpace : public Space {
int area_size_;
- // Maximum capacity of this space.
- intptr_t max_capacity_;
-
// Accounting information for this space.
AllocationStats accounting_stats_;
@@ -2264,49 +2277,21 @@ class SemiSpace : public Space {
// iterator is created are not iterated.
class SemiSpaceIterator : public ObjectIterator {
public:
- // Create an iterator over the objects in the given space. If no start
- // address is given, the iterator starts from the bottom of the space. If
- // no size function is given, the iterator calls Object::Size().
-
- // Iterate over all of allocated to-space.
+ // Create an iterator over the allocated objects in the given to-space.
explicit SemiSpaceIterator(NewSpace* space);
- // Iterate over all of allocated to-space, with a custome size function.
- SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
- // Iterate over part of allocated to-space, from start to the end
- // of allocation.
- SemiSpaceIterator(NewSpace* space, Address start);
- // Iterate from one address to another in the same semi-space.
- SemiSpaceIterator(Address from, Address to);
-
- HeapObject* Next() {
- if (current_ == limit_) return NULL;
- if (NewSpacePage::IsAtEnd(current_)) {
- NewSpacePage* page = NewSpacePage::FromLimit(current_);
- page = page->next_page();
- DCHECK(!page->is_anchor());
- current_ = page->area_start();
- if (current_ == limit_) return NULL;
- }
- HeapObject* object = HeapObject::FromAddress(current_);
- int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
-
- current_ += size;
- return object;
- }
+ inline HeapObject* Next();
// Implementation of the ObjectIterator functions.
- virtual HeapObject* next_object() { return Next(); }
+ virtual inline HeapObject* next_object();
private:
- void Initialize(Address start, Address end, HeapObjectCallback size_func);
+ void Initialize(Address start, Address end);
// The current iteration point.
Address current_;
// The end of iteration.
Address limit_;
- // The callback function.
- HeapObjectCallback size_func_;
};
@@ -2644,8 +2629,7 @@ class NewSpace : public Space {
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
- MUST_USE_RESULT AllocationResult
- SlowAllocateRaw(int size_in_bytes, AllocationAlignment alignment);
+ bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
friend class SemiSpaceIterator;
};
@@ -2656,11 +2640,10 @@ class NewSpace : public Space {
class OldSpace : public PagedSpace {
public:
- // Creates an old space object with a given maximum capacity.
- // The constructor does not allocate pages from OS.
- OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
- Executability executable)
- : PagedSpace(heap, max_capacity, id, executable) {}
+ // Creates an old space object. The constructor does not allocate pages
+ // from OS.
+ OldSpace(Heap* heap, AllocationSpace id, Executability executable)
+ : PagedSpace(heap, id, executable) {}
};
@@ -2677,9 +2660,9 @@ class OldSpace : public PagedSpace {
class MapSpace : public PagedSpace {
public:
- // Creates a map space object with a maximum capacity.
- MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
+ // Creates a map space object.
+ MapSpace(Heap* heap, AllocationSpace id)
+ : PagedSpace(heap, id, NOT_EXECUTABLE),
max_map_space_pages_(kMaxMapPageIndex - 1) {}
// Given an index, returns the page address.
@@ -2718,8 +2701,8 @@ class MapSpace : public PagedSpace {
class LargeObjectSpace : public Space {
public:
- LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
- virtual ~LargeObjectSpace() {}
+ LargeObjectSpace(Heap* heap, AllocationSpace id);
+ virtual ~LargeObjectSpace();
// Initializes internal data structures.
bool SetUp();
@@ -2737,8 +2720,6 @@ class LargeObjectSpace : public Space {
MUST_USE_RESULT AllocationResult
AllocateRaw(int object_size, Executability executable);
- bool CanAllocateSize(int size) { return Size() + size <= max_capacity_; }
-
// Available bytes for objects in this space.
inline intptr_t Available() override;
@@ -2788,7 +2769,6 @@ class LargeObjectSpace : public Space {
bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
private:
- intptr_t max_capacity_;
intptr_t maximum_committed_;
// The head of the linked list of large object chunks.
LargePage* first_page_;
@@ -2805,7 +2785,6 @@ class LargeObjectSpace : public Space {
class LargeObjectIterator : public ObjectIterator {
public:
explicit LargeObjectIterator(LargeObjectSpace* space);
- LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
HeapObject* Next();
@@ -2814,7 +2793,6 @@ class LargeObjectIterator : public ObjectIterator {
private:
LargePage* current_;
- HeapObjectCallback size_func_;
};
@@ -2825,45 +2803,7 @@ class PointerChunkIterator BASE_EMBEDDED {
inline explicit PointerChunkIterator(Heap* heap);
// Return NULL when the iterator is done.
- MemoryChunk* next() {
- switch (state_) {
- case kOldSpaceState: {
- if (old_iterator_.has_next()) {
- return old_iterator_.next();
- }
- state_ = kMapState;
- // Fall through.
- }
- case kMapState: {
- if (map_iterator_.has_next()) {
- return map_iterator_.next();
- }
- state_ = kLargeObjectState;
- // Fall through.
- }
- case kLargeObjectState: {
- HeapObject* heap_object;
- do {
- heap_object = lo_iterator_.Next();
- if (heap_object == NULL) {
- state_ = kFinishedState;
- return NULL;
- }
- // Fixed arrays are the only pointer-containing objects in large
- // object space.
- } while (!heap_object->IsFixedArray());
- MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
- return answer;
- }
- case kFinishedState:
- return NULL;
- default:
- break;
- }
- UNREACHABLE();
- return NULL;
- }
-
+ inline MemoryChunk* next();
private:
enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState };
diff --git a/deps/v8/src/heap/store-buffer-inl.h b/deps/v8/src/heap/store-buffer-inl.h
index 3066375289..f168fe0a59 100644
--- a/deps/v8/src/heap/store-buffer-inl.h
+++ b/deps/v8/src/heap/store-buffer-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_STORE_BUFFER_INL_H_
#define V8_STORE_BUFFER_INL_H_
+#include "src/heap/heap.h"
#include "src/heap/store-buffer.h"
namespace v8 {
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index efdd0b4708..95fb83c3c8 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <algorithm>
+#include "src/heap/store-buffer.h"
-#include "src/v8.h"
+#include <algorithm>
#include "src/counters.h"
#include "src/heap/store-buffer-inl.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
@@ -92,9 +93,6 @@ void StoreBuffer::SetUp() {
hash_sets_are_empty_ = false;
ClearFilteringHashSets();
-
- heap_->isolate()->set_store_buffer_hash_set_1_address(hash_set_1_);
- heap_->isolate()->set_store_buffer_hash_set_2_address(hash_set_2_);
}
@@ -455,7 +453,7 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
if (owner == heap_->map_space()) {
DCHECK(page->WasSwept());
- HeapObjectIterator iterator(page, NULL);
+ HeapObjectIterator iterator(page);
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
heap_object = iterator.Next()) {
// We skip free space objects.
@@ -468,18 +466,9 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
}
}
} else {
- if (!page->SweepingCompleted()) {
- heap_->mark_compact_collector()->SweepInParallel(page, owner);
- if (!page->SweepingCompleted()) {
- // We were not able to sweep that page, i.e., a concurrent
- // sweeper thread currently owns this page.
- // TODO(hpayer): This may introduce a huge pause here. We
- // just care about finish sweeping of the scan on scavenge page.
- heap_->mark_compact_collector()->EnsureSweepingCompleted();
- }
- }
- CHECK(page->owner() == heap_->old_space());
- HeapObjectIterator iterator(page, NULL);
+ heap_->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
+ page);
+ HeapObjectIterator iterator(page);
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
heap_object = iterator.Next()) {
// We iterate over objects that contain new space pointers only.
@@ -503,17 +492,6 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
obj_address + FixedTypedArrayBase::kBasePointerOffset,
obj_address + FixedTypedArrayBase::kHeaderSize,
slot_callback);
- } else if (heap_object->IsJSArrayBuffer()) {
- FindPointersToNewSpaceInRegion(
- obj_address +
- JSArrayBuffer::BodyDescriptor::kStartOffset,
- obj_address + JSArrayBuffer::kByteLengthOffset +
- kPointerSize,
- slot_callback);
- FindPointersToNewSpaceInRegion(
- obj_address + JSArrayBuffer::kSize,
- obj_address + JSArrayBuffer::kSizeWithInternalFields,
- slot_callback);
} else if (FLAG_unbox_double_fields) {
LayoutDescriptorHelper helper(heap_object->map());
DCHECK(!helper.all_fields_tagged());
@@ -549,9 +527,6 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
void StoreBuffer::Compact() {
- CHECK(hash_set_1_ == heap_->isolate()->store_buffer_hash_set_1_address());
- CHECK(hash_set_2_ == heap_->isolate()->store_buffer_hash_set_2_address());
-
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
if (top == start_) return;
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 2843195e86..770d710a3a 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -820,6 +820,7 @@ bool HInstruction::CanDeoptimize() {
case HValue::kLeaveInlined:
case HValue::kLoadFieldByIndex:
case HValue::kLoadGlobalGeneric:
+ case HValue::kLoadGlobalViaContext:
case HValue::kLoadNamedField:
case HValue::kLoadNamedGeneric:
case HValue::kLoadRoot:
@@ -833,6 +834,7 @@ bool HInstruction::CanDeoptimize() {
case HValue::kSeqStringGetChar:
case HValue::kStoreCodeEntry:
case HValue::kStoreFrameContext:
+ case HValue::kStoreGlobalViaContext:
case HValue::kStoreKeyed:
case HValue::kStoreNamedField:
case HValue::kStoreNamedGeneric:
@@ -1158,7 +1160,8 @@ Representation HBranch::observed_input_representation(int index) {
if (expected_input_types_.Contains(ToBooleanStub::NULL_TYPE) ||
expected_input_types_.Contains(ToBooleanStub::SPEC_OBJECT) ||
expected_input_types_.Contains(ToBooleanStub::STRING) ||
- expected_input_types_.Contains(ToBooleanStub::SYMBOL)) {
+ expected_input_types_.Contains(ToBooleanStub::SYMBOL) ||
+ expected_input_types_.Contains(ToBooleanStub::SIMD_VALUE)) {
return Representation::Tagged();
}
if (expected_input_types_.Contains(ToBooleanStub::UNDEFINED)) {
@@ -1323,6 +1326,17 @@ static String* TypeOfString(HConstant* constant, Isolate* isolate) {
}
case SYMBOL_TYPE:
return heap->symbol_string();
+ case SIMD128_VALUE_TYPE: {
+ Unique<Map> map = constant->ObjectMap();
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ if (map.IsKnownGlobal(heap->type##_map())) { \
+ return heap->type##_string(); \
+ }
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ UNREACHABLE();
+ return nullptr;
+ }
case JS_FUNCTION_TYPE:
case JS_FUNCTION_PROXY_TYPE:
return heap->function_string();
@@ -1427,6 +1441,17 @@ HValue* HBitwise::Canonicalize() {
}
+// static
+HInstruction* HAdd::New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right, Strength strength,
+ ExternalAddType external_add_type) {
+ // For everything else, you should use the other factory method without
+ // ExternalAddType.
+ DCHECK_EQ(external_add_type, AddOfExternalAndTagged);
+ return new (zone) HAdd(context, left, right, strength, external_add_type);
+}
+
+
Representation HAdd::RepresentationFromInputs() {
Representation left_rep = left()->representation();
if (left_rep.IsExternal()) {
@@ -1440,7 +1465,11 @@ Representation HAdd::RequiredInputRepresentation(int index) {
if (index == 2) {
Representation left_rep = left()->representation();
if (left_rep.IsExternal()) {
- return Representation::Integer32();
+ if (external_add_type_ == AddOfExternalAndTagged) {
+ return Representation::Tagged();
+ } else {
+ return Representation::Integer32();
+ }
}
}
return HArithmeticBinaryOperation::RequiredInputRepresentation(index);
@@ -3187,18 +3216,13 @@ Range* HLoadNamedField::InferRange(Zone* zone) {
Range* HLoadKeyed::InferRange(Zone* zone) {
switch (elements_kind()) {
- case EXTERNAL_INT8_ELEMENTS:
case INT8_ELEMENTS:
return new(zone) Range(kMinInt8, kMaxInt8);
- case EXTERNAL_UINT8_ELEMENTS:
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
return new(zone) Range(kMinUInt8, kMaxUInt8);
- case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
return new(zone) Range(kMinInt16, kMaxInt16);
- case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
return new(zone) Range(kMinUInt16, kMaxUInt16);
default:
@@ -3441,11 +3465,11 @@ std::ostream& HLoadNamedGeneric::PrintDataTo(
std::ostream& HLoadKeyed::PrintDataTo(std::ostream& os) const { // NOLINT
- if (!is_external()) {
+ if (!is_fixed_typed_array()) {
os << NameOf(elements());
} else {
- DCHECK(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
- elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ DCHECK(elements_kind() >= FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND &&
+ elements_kind() <= LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
os << NameOf(elements()) << "." << ElementsKindToString(elements_kind());
}
@@ -3480,7 +3504,7 @@ bool HLoadKeyed::UsesMustHandleHole() const {
return false;
}
- if (IsExternalArrayElementsKind(elements_kind())) {
+ if (IsFixedTypedArrayElementsKind(elements_kind())) {
return false;
}
@@ -3520,7 +3544,7 @@ bool HLoadKeyed::RequiresHoleCheck() const {
return false;
}
- if (IsExternalArrayElementsKind(elements_kind())) {
+ if (IsFixedTypedArrayElementsKind(elements_kind())) {
return false;
}
@@ -3579,6 +3603,13 @@ std::ostream& HStoreNamedGeneric::PrintDataTo(
}
+std::ostream& HStoreGlobalViaContext::PrintDataTo(
+ std::ostream& os) const { // NOLINT
+ return os << " depth:" << depth() << " slot:" << slot_index() << " = "
+ << NameOf(value());
+}
+
+
std::ostream& HStoreNamedField::PrintDataTo(std::ostream& os) const { // NOLINT
os << NameOf(object()) << access_ << " = " << NameOf(value());
if (NeedsWriteBarrier()) os << " (write-barrier)";
@@ -3588,11 +3619,11 @@ std::ostream& HStoreNamedField::PrintDataTo(std::ostream& os) const { // NOLINT
std::ostream& HStoreKeyed::PrintDataTo(std::ostream& os) const { // NOLINT
- if (!is_external()) {
+ if (!is_fixed_typed_array()) {
os << NameOf(elements());
} else {
- DCHECK(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
- elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ DCHECK(elements_kind() >= FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND &&
+ elements_kind() <= LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
os << NameOf(elements()) << "." << ElementsKindToString(elements_kind());
}
@@ -3629,6 +3660,12 @@ std::ostream& HLoadGlobalGeneric::PrintDataTo(
}
+std::ostream& HLoadGlobalViaContext::PrintDataTo(
+ std::ostream& os) const { // NOLINT
+ return os << "depth:" << depth() << " slot:" << slot_index();
+}
+
+
std::ostream& HInnerAllocatedObject::PrintDataTo(
std::ostream& os) const { // NOLINT
os << NameOf(base_object()) << " offset ";
@@ -3949,8 +3986,7 @@ bool HStoreKeyed::NeedsCanonicalization() {
switch (value()->opcode()) {
case kLoadKeyed: {
ElementsKind load_kind = HLoadKeyed::cast(value())->elements_kind();
- return IsExternalFloatOrDoubleElementsKind(load_kind) ||
- IsFixedFloatElementsKind(load_kind);
+ return IsFixedFloatElementsKind(load_kind);
}
case kChange: {
Representation from = HChange::cast(value())->from();
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 2cac0eb460..807a651029 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -8,8 +8,6 @@
#include <cstring>
#include <iosfwd>
-#include "src/v8.h"
-
#include "src/allocation.h"
#include "src/base/bits.h"
#include "src/bit-vector.h"
@@ -119,6 +117,7 @@ class LChunkBuilder;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
+ V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -147,6 +146,7 @@ class LChunkBuilder;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
+ V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -4828,11 +4828,21 @@ class HPower final : public HTemplateInstruction<2> {
};
+enum ExternalAddType {
+ AddOfExternalAndTagged,
+ AddOfExternalAndInt32,
+ NoExternalAdd
+};
+
+
class HAdd final : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
HValue* left, HValue* right,
Strength strength = Strength::WEAK);
+ static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+ HValue* left, HValue* right, Strength strength,
+ ExternalAddType external_add_type);
// Add is only commutative if two integer values are added and not if two
// tagged values are added (because it might be a String concatenation).
@@ -4875,6 +4885,16 @@ class HAdd final : public HArithmeticBinaryOperation {
Representation RequiredInputRepresentation(int index) override;
+ bool IsConsistentExternalRepresentation() {
+ return left()->representation().IsExternal() &&
+ ((external_add_type_ == AddOfExternalAndInt32 &&
+ right()->representation().IsInteger32()) ||
+ (external_add_type_ == AddOfExternalAndTagged &&
+ right()->representation().IsTagged()));
+ }
+
+ ExternalAddType external_add_type() const { return external_add_type_; }
+
DECLARE_CONCRETE_INSTRUCTION(Add)
protected:
@@ -4883,10 +4903,37 @@ class HAdd final : public HArithmeticBinaryOperation {
Range* InferRange(Zone* zone) override;
private:
- HAdd(HValue* context, HValue* left, HValue* right, Strength strength)
- : HArithmeticBinaryOperation(context, left, right, strength) {
+ HAdd(HValue* context, HValue* left, HValue* right, Strength strength,
+ ExternalAddType external_add_type = NoExternalAdd)
+ : HArithmeticBinaryOperation(context, left, right, strength),
+ external_add_type_(external_add_type) {
SetFlag(kCanOverflow);
+ switch (external_add_type_) {
+ case AddOfExternalAndTagged:
+ DCHECK(left->representation().IsExternal());
+ DCHECK(right->representation().IsTagged());
+ SetDependsOnFlag(kNewSpacePromotion);
+ ClearFlag(HValue::kCanOverflow);
+ SetFlag(kHasNoObservableSideEffects);
+ break;
+
+ case NoExternalAdd:
+ // This is a bit of a hack: The call to this constructor is generated
+ // by a macro that also supports sub and mul, so it doesn't pass in
+ // a value for external_add_type but uses the default.
+ if (left->representation().IsExternal()) {
+ external_add_type_ = AddOfExternalAndInt32;
+ }
+ break;
+
+ case AddOfExternalAndInt32:
+ // See comment above.
+ UNREACHABLE();
+ break;
+ }
}
+
+ ExternalAddType external_add_type_;
};
@@ -5401,12 +5448,12 @@ class HUnknownOSRValue final : public HTemplateInstruction<0> {
class HLoadGlobalGeneric final : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadGlobalGeneric, HValue*,
- Handle<String>, bool);
+ Handle<String>, TypeofMode);
HValue* context() { return OperandAt(0); }
HValue* global_object() { return OperandAt(1); }
Handle<String> name() const { return name_; }
- bool for_typeof() const { return for_typeof_; }
+ TypeofMode typeof_mode() const { return typeof_mode_; }
FeedbackVectorICSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
@@ -5428,9 +5475,9 @@ class HLoadGlobalGeneric final : public HTemplateInstruction<2> {
private:
HLoadGlobalGeneric(HValue* context, HValue* global_object,
- Handle<String> name, bool for_typeof)
+ Handle<String> name, TypeofMode typeof_mode)
: name_(name),
- for_typeof_(for_typeof),
+ typeof_mode_(typeof_mode),
slot_(FeedbackVectorICSlot::Invalid()) {
SetOperandAt(0, context);
SetOperandAt(1, global_object);
@@ -5439,12 +5486,41 @@ class HLoadGlobalGeneric final : public HTemplateInstruction<2> {
}
Handle<String> name_;
- bool for_typeof_;
+ TypeofMode typeof_mode_;
Handle<TypeFeedbackVector> feedback_vector_;
FeedbackVectorICSlot slot_;
};
+class HLoadGlobalViaContext final : public HTemplateInstruction<1> {
+ public:
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadGlobalViaContext, int, int);
+
+ HValue* context() { return OperandAt(0); }
+ int depth() const { return depth_; }
+ int slot_index() const { return slot_index_; }
+
+ std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
+
+ Representation RequiredInputRepresentation(int index) override {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext)
+
+ private:
+ HLoadGlobalViaContext(HValue* context, int depth, int slot_index)
+ : depth_(depth), slot_index_(slot_index) {
+ SetOperandAt(0, context);
+ set_representation(Representation::Tagged());
+ SetAllSideEffects();
+ }
+
+ int const depth_;
+ int const slot_index_;
+};
+
+
class HAllocate final : public HTemplateInstruction<2> {
public:
static bool CompatibleInstanceTypes(InstanceType type1,
@@ -5911,6 +5987,11 @@ class HObjectAccess final {
Representation::Integer32());
}
+ static HObjectAccess ForOddballTypeOf() {
+ return HObjectAccess(kInobject, Oddball::kTypeOfOffset,
+ Representation::HeapObject());
+ }
+
static HObjectAccess ForElementsPointer() {
return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
}
@@ -5950,6 +6031,12 @@ class HObjectAccess final {
Representation::Tagged());
}
+ static HObjectAccess ForFixedTypedArrayBaseExternalPointer() {
+ return HObjectAccess::ForObservableJSObjectOffset(
+ FixedTypedArrayBase::kExternalPointerOffset,
+ Representation::External());
+ }
+
static HObjectAccess ForStringHashField() {
return HObjectAccess(kInobject,
String::kHashFieldOffset,
@@ -6019,10 +6106,10 @@ class HObjectAccess final {
Representation::Integer32());
}
- static HObjectAccess ForMapInObjectProperties() {
- return HObjectAccess(kInobject,
- Map::kInObjectPropertiesOffset,
- Representation::UInteger8());
+ static HObjectAccess ForMapInObjectPropertiesOrConstructorFunctionIndex() {
+ return HObjectAccess(
+ kInobject, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
+ Representation::UInteger8());
}
static HObjectAccess ForMapInstanceType() {
@@ -6069,6 +6156,11 @@ class HObjectAccess final {
return HObjectAccess(kInobject, PropertyCell::kValueOffset);
}
+ static HObjectAccess ForPropertyCellDetails() {
+ return HObjectAccess(kInobject, PropertyCell::kDetailsOffset,
+ Representation::Smi());
+ }
+
static HObjectAccess ForCellValue() {
return HObjectAccess(kInobject, Cell::kValueOffset);
}
@@ -6155,11 +6247,6 @@ class HObjectAccess final {
JSArrayBuffer::kBitFieldSlot, Representation::Smi());
}
- static HObjectAccess ForExternalArrayExternalPointer() {
- return HObjectAccess::ForObservableJSObjectOffset(
- ExternalArray::kExternalPointerOffset, Representation::External());
- }
-
static HObjectAccess ForJSArrayBufferViewBuffer() {
return HObjectAccess::ForObservableJSObjectOffset(
JSArrayBufferView::kBufferOffset);
@@ -6529,15 +6616,9 @@ class HLoadKeyed final : public HTemplateInstruction<3>,
DECLARE_INSTRUCTION_FACTORY_P6(HLoadKeyed, HValue*, HValue*, HValue*,
ElementsKind, LoadKeyedHoleMode, int);
- bool is_external() const {
- return IsExternalArrayElementsKind(elements_kind());
- }
bool is_fixed_typed_array() const {
return IsFixedTypedArrayElementsKind(elements_kind());
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
HValue* elements() const { return OperandAt(0); }
HValue* key() const { return OperandAt(1); }
HValue* dependency() const {
@@ -6565,11 +6646,11 @@ class HLoadKeyed final : public HTemplateInstruction<3>,
Representation RequiredInputRepresentation(int index) override {
// kind_fast: tagged[int32] (none)
// kind_double: tagged[int32] (none)
- // kind_fixed_typed_array: tagged[int32] (none)
+ // kind_fixed_typed_array: external[int32] (none)
// kind_external: external[int32] (none)
if (index == 0) {
- return is_external() ? Representation::External()
- : Representation::Tagged();
+ return is_fixed_typed_array() ? Representation::External()
+ : Representation::Tagged();
}
if (index == 1) {
return ArrayInstructionInterface::KeyedAccessIndexRequirement(
@@ -6618,7 +6699,7 @@ class HLoadKeyed final : public HTemplateInstruction<3>,
SetOperandAt(1, key);
SetOperandAt(2, dependency != NULL ? dependency : obj);
- if (!is_typed_elements()) {
+ if (!is_fixed_typed_array()) {
// I can detect the case between storing double (holey and fast) and
// smi/object by looking at elements_kind_.
DCHECK(IsFastSmiOrObjectElementsKind(elements_kind) ||
@@ -6644,18 +6725,15 @@ class HLoadKeyed final : public HTemplateInstruction<3>,
SetDependsOnFlag(kDoubleArrayElements);
}
} else {
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS ||
+ if (elements_kind == FLOAT32_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
set_representation(Representation::Double());
} else {
set_representation(Representation::Integer32());
}
- if (is_external()) {
+ if (is_fixed_typed_array()) {
SetDependsOnFlag(kExternalMemory);
- } else if (is_fixed_typed_array()) {
SetDependsOnFlag(kTypedArrayElements);
} else {
UNREACHABLE();
@@ -6974,6 +7052,39 @@ class HStoreNamedGeneric final : public HTemplateInstruction<3> {
};
+class HStoreGlobalViaContext final : public HTemplateInstruction<2> {
+ public:
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreGlobalViaContext, HValue*,
+ int, int, LanguageMode);
+ HValue* context() const { return OperandAt(0); }
+ HValue* value() const { return OperandAt(1); }
+ int depth() const { return depth_; }
+ int slot_index() const { return slot_index_; }
+ LanguageMode language_mode() const { return language_mode_; }
+
+ std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
+
+ Representation RequiredInputRepresentation(int index) override {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext)
+
+ private:
+ HStoreGlobalViaContext(HValue* context, HValue* value, int depth,
+ int slot_index, LanguageMode language_mode)
+ : depth_(depth), slot_index_(slot_index), language_mode_(language_mode) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, value);
+ SetAllSideEffects();
+ }
+
+ int const depth_;
+ int const slot_index_;
+ LanguageMode const language_mode_;
+};
+
+
class HStoreKeyed final : public HTemplateInstruction<3>,
public ArrayInstructionInterface {
public:
@@ -6991,8 +7102,8 @@ class HStoreKeyed final : public HTemplateInstruction<3>,
// kind_fixed_typed_array: tagged[int32] = (double | int32)
// kind_external: external[int32] = (double | int32)
if (index == 0) {
- return is_external() ? Representation::External()
- : Representation::Tagged();
+ return is_fixed_typed_array() ? Representation::External()
+ : Representation::Tagged();
} else if (index == 1) {
return ArrayInstructionInterface::KeyedAccessIndexRequirement(
OperandAt(1)->representation());
@@ -7017,24 +7128,16 @@ class HStoreKeyed final : public HTemplateInstruction<3>,
return Representation::Smi();
}
- return IsExternalArrayElementsKind(kind) ||
- IsFixedTypedArrayElementsKind(kind)
- ? Representation::Integer32()
- : Representation::Tagged();
- }
-
- bool is_external() const {
- return IsExternalArrayElementsKind(elements_kind());
+ if (IsFixedTypedArrayElementsKind(kind)) {
+ return Representation::Integer32();
+ }
+ return Representation::Tagged();
}
bool is_fixed_typed_array() const {
return IsFixedTypedArrayElementsKind(elements_kind());
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
-
Representation observed_input_representation(int index) override {
if (index < 2) return RequiredInputRepresentation(index);
if (IsUninitialized()) {
@@ -7124,25 +7227,20 @@ class HStoreKeyed final : public HTemplateInstruction<3>,
SetFlag(kTrackSideEffectDominators);
SetDependsOnFlag(kNewSpacePromotion);
}
- if (is_external()) {
- SetChangesFlag(kExternalMemory);
- SetFlag(kAllowUndefinedAsNaN);
- } else if (IsFastDoubleElementsKind(elements_kind)) {
+ if (IsFastDoubleElementsKind(elements_kind)) {
SetChangesFlag(kDoubleArrayElements);
} else if (IsFastSmiElementsKind(elements_kind)) {
SetChangesFlag(kArrayElements);
} else if (is_fixed_typed_array()) {
SetChangesFlag(kTypedArrayElements);
+ SetChangesFlag(kExternalMemory);
SetFlag(kAllowUndefinedAsNaN);
} else {
SetChangesFlag(kArrayElements);
}
- // EXTERNAL_{UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating.
- if ((elements_kind >= EXTERNAL_INT8_ELEMENTS &&
- elements_kind <= EXTERNAL_UINT32_ELEMENTS) ||
- (elements_kind >= UINT8_ELEMENTS &&
- elements_kind <= INT32_ELEMENTS)) {
+ // {UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating.
+ if (elements_kind >= UINT8_ELEMENTS && elements_kind <= INT32_ELEMENTS) {
SetFlag(kTruncatingToInt32);
}
}
diff --git a/deps/v8/src/hydrogen-uint32-analysis.cc b/deps/v8/src/hydrogen-uint32-analysis.cc
index 37f19ebdaf..c6cbc9bc35 100644
--- a/deps/v8/src/hydrogen-uint32-analysis.cc
+++ b/deps/v8/src/hydrogen-uint32-analysis.cc
@@ -10,10 +10,6 @@ namespace internal {
static bool IsUnsignedLoad(HLoadKeyed* instr) {
switch (instr->elements_kind()) {
- case EXTERNAL_UINT8_ELEMENTS:
- case EXTERNAL_UINT16_ELEMENTS:
- case EXTERNAL_UINT32_ELEMENTS:
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
case UINT8_ELEMENTS:
case UINT16_ELEMENTS:
case UINT32_ELEMENTS:
@@ -50,14 +46,14 @@ bool HUint32AnalysisPhase::IsSafeUint32Use(HValue* val, HValue* use) {
return true;
} else if (use->IsStoreKeyed()) {
HStoreKeyed* store = HStoreKeyed::cast(use);
- if (store->is_external()) {
+ if (store->is_fixed_typed_array()) {
// Storing a value into an external integer array is a bit level
// operation.
if (store->value() == val) {
// Clamping or a conversion to double should have beed inserted.
- DCHECK(store->elements_kind() != EXTERNAL_UINT8_CLAMPED_ELEMENTS);
- DCHECK(store->elements_kind() != EXTERNAL_FLOAT32_ELEMENTS);
- DCHECK(store->elements_kind() != EXTERNAL_FLOAT64_ELEMENTS);
+ DCHECK(store->elements_kind() != UINT8_CLAMPED_ELEMENTS);
+ DCHECK(store->elements_kind() != FLOAT32_ELEMENTS);
+ DCHECK(store->elements_kind() != FLOAT64_ELEMENTS);
return true;
}
}
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 2a0e2c3919..8984a6e9f3 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -10,7 +10,7 @@
#include "src/allocation-site-scopes.h"
#include "src/ast-numbering.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/hydrogen-bce.h"
#include "src/hydrogen-bch.h"
#include "src/hydrogen-canonicalize.h"
@@ -339,7 +339,7 @@ void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
DCHECK(IsLoopHeader() || first_ == NULL);
HEnvironment* incoming_env = pred->last_environment();
if (IsLoopHeader()) {
- DCHECK(phis()->length() == incoming_env->length());
+ DCHECK_EQ(phis()->length(), incoming_env->length());
for (int i = 0; i < phis_.length(); ++i) {
phis_[i]->AddInput(incoming_env->values()->at(i));
}
@@ -997,6 +997,13 @@ void HGraphBuilder::IfBuilder::Finish(HBasicBlock** then_continuation,
}
+void HGraphBuilder::IfBuilder::EndUnreachable() {
+ if (captured_) return;
+ Finish();
+ builder()->set_current_block(nullptr);
+}
+
+
void HGraphBuilder::IfBuilder::End() {
if (captured_) return;
Finish();
@@ -2025,6 +2032,99 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
}
+HValue* HGraphBuilder::BuildToObject(HValue* receiver) {
+ NoObservableSideEffectsScope scope(this);
+
+ // Create a joinable continuation.
+ HIfContinuation wrap(graph()->CreateBasicBlock(),
+ graph()->CreateBasicBlock());
+
+ // Determine the proper global constructor function required to wrap
+ // {receiver} into a JSValue, unless {receiver} is already a {JSReceiver}, in
+ // which case we just return it. Deopts to Runtime::kToObject if {receiver}
+ // is undefined or null.
+ IfBuilder receiver_is_smi(this);
+ receiver_is_smi.If<HIsSmiAndBranch>(receiver);
+ receiver_is_smi.Then();
+ {
+ // Use global Number function.
+ Push(Add<HConstant>(Context::NUMBER_FUNCTION_INDEX));
+ }
+ receiver_is_smi.Else();
+ {
+ // Determine {receiver} map and instance type.
+ HValue* receiver_map =
+ Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
+ HValue* receiver_instance_type = Add<HLoadNamedField>(
+ receiver_map, nullptr, HObjectAccess::ForMapInstanceType());
+
+ // First check whether {receiver} is already a spec object (fast case).
+ IfBuilder receiver_is_not_spec_object(this);
+ receiver_is_not_spec_object.If<HCompareNumericAndBranch>(
+ receiver_instance_type, Add<HConstant>(FIRST_SPEC_OBJECT_TYPE),
+ Token::LT);
+ receiver_is_not_spec_object.Then();
+ {
+ // Load the constructor function index from the {receiver} map.
+ HValue* constructor_function_index = Add<HLoadNamedField>(
+ receiver_map, nullptr,
+ HObjectAccess::ForMapInObjectPropertiesOrConstructorFunctionIndex());
+
+ // Check if {receiver} has a constructor (null and undefined have no
+ // constructors, so we deoptimize to the runtime to throw an exception).
+ IfBuilder constructor_function_index_is_invalid(this);
+ constructor_function_index_is_invalid.If<HCompareNumericAndBranch>(
+ constructor_function_index,
+ Add<HConstant>(Map::kNoConstructorFunctionIndex), Token::EQ);
+ constructor_function_index_is_invalid.ThenDeopt(
+ Deoptimizer::kUndefinedOrNullInToObject);
+ constructor_function_index_is_invalid.End();
+
+ // Use the global constructor function.
+ Push(constructor_function_index);
+ }
+ receiver_is_not_spec_object.JoinContinuation(&wrap);
+ }
+ receiver_is_smi.JoinContinuation(&wrap);
+
+ // Wrap the receiver if necessary.
+ IfBuilder if_wrap(this, &wrap);
+ if_wrap.Then();
+ {
+ // Grab the constructor function index.
+ HValue* constructor_index = Pop();
+
+ // Load native context.
+ HValue* native_context = BuildGetNativeContext();
+
+ // Determine the initial map for the global constructor.
+ HValue* constructor = Add<HLoadKeyed>(native_context, constructor_index,
+ nullptr, FAST_ELEMENTS);
+ HValue* constructor_initial_map = Add<HLoadNamedField>(
+ constructor, nullptr, HObjectAccess::ForPrototypeOrInitialMap());
+ // Allocate and initialize a JSValue wrapper.
+ HValue* value =
+ BuildAllocate(Add<HConstant>(JSValue::kSize), HType::JSObject(),
+ JS_VALUE_TYPE, HAllocationMode());
+ Add<HStoreNamedField>(value, HObjectAccess::ForMap(),
+ constructor_initial_map);
+ HValue* empty_fixed_array = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
+ Add<HStoreNamedField>(value, HObjectAccess::ForPropertiesPointer(),
+ empty_fixed_array);
+ Add<HStoreNamedField>(value, HObjectAccess::ForElementsPointer(),
+ empty_fixed_array);
+ Add<HStoreNamedField>(value, HObjectAccess::ForObservableJSObjectOffset(
+ JSValue::kValueOffset),
+ receiver);
+ Push(value);
+ }
+ if_wrap.Else();
+ { Push(receiver); }
+ if_wrap.End();
+ return Pop();
+}
+
+
HAllocate* HGraphBuilder::BuildAllocate(
HValue* object_size,
HType type,
@@ -2339,7 +2439,7 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(
// Fallback to the runtime to add the two strings.
Add<HPushArguments>(left, right);
Push(Add<HCallRuntime>(isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kStringAddRT), 2));
+ Runtime::FunctionForId(Runtime::kStringAdd), 2));
}
if_sameencodingandsequential.End();
}
@@ -2409,9 +2509,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
KeyedAccessStoreMode store_mode) {
DCHECK(top_info()->IsStub() || checked_object->IsCompareMap() ||
checked_object->IsCheckMaps());
- DCHECK((!IsExternalArrayElementsKind(elements_kind) &&
- !IsFixedTypedArrayElementsKind(elements_kind)) ||
- !is_js_array);
+ DCHECK(!IsFixedTypedArrayElementsKind(elements_kind) || !is_js_array);
// No GVNFlag is necessary for ElementsKind if there is an explicit dependency
// on a HElementsTransition instruction. The flag can also be removed if the
// map to check has FAST_HOLEY_ELEMENTS, since there can be no further
@@ -2442,17 +2540,17 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
}
length->set_type(HType::Smi());
HValue* checked_key = NULL;
- if (IsExternalArrayElementsKind(elements_kind) ||
- IsFixedTypedArrayElementsKind(elements_kind)) {
+ if (IsFixedTypedArrayElementsKind(elements_kind)) {
checked_object = Add<HCheckArrayBufferNotNeutered>(checked_object);
- HValue* backing_store;
- if (IsExternalArrayElementsKind(elements_kind)) {
- backing_store = Add<HLoadNamedField>(
- elements, nullptr, HObjectAccess::ForExternalArrayExternalPointer());
- } else {
- backing_store = elements;
- }
+ HValue* external_pointer = Add<HLoadNamedField>(
+ elements, nullptr,
+ HObjectAccess::ForFixedTypedArrayBaseExternalPointer());
+ HValue* base_pointer = Add<HLoadNamedField>(
+ elements, nullptr, HObjectAccess::ForFixedTypedArrayBaseBasePointer());
+ HValue* backing_store = AddUncasted<HAdd>(
+ external_pointer, base_pointer, Strength::WEAK, AddOfExternalAndTagged);
+
if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
NoObservableSideEffectsScope no_effects(this);
IfBuilder length_checker(this);
@@ -2684,8 +2782,7 @@ HInstruction* HGraphBuilder::AddElementAccess(
LoadKeyedHoleMode load_mode) {
if (access_type == STORE) {
DCHECK(val != NULL);
- if (elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
- elements_kind == UINT8_CLAMPED_ELEMENTS) {
+ if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
val = Add<HClampToUint8>(val);
}
return Add<HStoreKeyed>(elements, checked_key, val, elements_kind,
@@ -2696,8 +2793,7 @@ HInstruction* HGraphBuilder::AddElementAccess(
DCHECK(val == NULL);
HLoadKeyed* load = Add<HLoadKeyed>(
elements, checked_key, dependency, elements_kind, load_mode);
- if (elements_kind == EXTERNAL_UINT32_ELEMENTS ||
- elements_kind == UINT32_ELEMENTS) {
+ if (elements_kind == UINT32_ELEMENTS) {
graph()->RecordUint32Instruction(load);
}
return load;
@@ -3134,6 +3230,17 @@ void HGraphBuilder::BuildCreateAllocationMemento(
}
+HInstruction* HGraphBuilder::BuildGetNativeContext() {
+ // Get the global object, then the native context
+ HValue* global_object = Add<HLoadNamedField>(
+ context(), nullptr,
+ HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ return Add<HLoadNamedField>(global_object, nullptr,
+ HObjectAccess::ForObservableJSObjectOffset(
+ GlobalObject::kNativeContextOffset));
+}
+
+
HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* closure) {
// Get the global object, then the native context
HInstruction* context = Add<HLoadNamedField>(
@@ -3157,14 +3264,51 @@ HInstruction* HGraphBuilder::BuildGetScriptContext(int context_index) {
}
-HInstruction* HGraphBuilder::BuildGetNativeContext() {
- // Get the global object, then the native context
- HValue* global_object = Add<HLoadNamedField>(
- context(), nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
- return Add<HLoadNamedField>(global_object, nullptr,
- HObjectAccess::ForObservableJSObjectOffset(
- GlobalObject::kNativeContextOffset));
+HValue* HGraphBuilder::BuildGetParentContext(HValue* depth, int depth_value) {
+ HValue* script_context = context();
+ if (depth != NULL) {
+ HValue* zero = graph()->GetConstant0();
+
+ Push(script_context);
+ Push(depth);
+
+ LoopBuilder loop(this);
+ loop.BeginBody(2); // Drop script_context and depth from last environment
+ // to appease live range building without simulates.
+ depth = Pop();
+ script_context = Pop();
+
+ script_context = Add<HLoadNamedField>(
+ script_context, nullptr,
+ HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
+ depth = AddUncasted<HSub>(depth, graph()->GetConstant1());
+ depth->ClearFlag(HValue::kCanOverflow);
+
+ IfBuilder if_break(this);
+ if_break.If<HCompareNumericAndBranch, HValue*>(depth, zero, Token::EQ);
+ if_break.Then();
+ {
+ Push(script_context); // The result.
+ loop.Break();
+ }
+ if_break.Else();
+ {
+ Push(script_context);
+ Push(depth);
+ }
+ loop.EndBody();
+ if_break.End();
+
+ script_context = Pop();
+ } else if (depth_value > 0) {
+ // Unroll the above loop.
+ for (int i = 0; i < depth_value; i++) {
+ script_context = Add<HLoadNamedField>(
+ script_context, nullptr,
+ HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
+ }
+ }
+ return script_context;
}
@@ -3989,7 +4133,7 @@ AstContext::AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind)
: owner_(owner),
kind_(kind),
outer_(owner->ast_context()),
- for_typeof_(false) {
+ typeof_mode_(NOT_INSIDE_TYPEOF) {
owner->set_ast_context(this); // Push.
#ifdef DEBUG
DCHECK(owner->environment()->frame_type() == JS_FUNCTION);
@@ -4237,7 +4381,7 @@ void HOptimizedGraphBuilder::VisitForValue(Expression* expr,
void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
- for_value.set_for_typeof(true);
+ for_value.set_typeof_mode(INSIDE_TYPEOF);
Visit(expr);
}
@@ -4267,7 +4411,7 @@ void HOptimizedGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs,
bool HOptimizedGraphBuilder::BuildGraph() {
- if (IsSubclassConstructor(current_info()->function()->kind())) {
+ if (IsSubclassConstructor(current_info()->literal()->kind())) {
Bailout(kSuperReference);
return false;
}
@@ -4302,17 +4446,12 @@ bool HOptimizedGraphBuilder::BuildGraph() {
body_entry->SetJoinId(BailoutId::FunctionEntry());
set_current_block(body_entry);
- // Handle implicit declaration of the function name in named function
- // expressions before other declarations.
- if (scope->is_function_scope() && scope->function() != NULL) {
- VisitVariableDeclaration(scope->function());
- }
VisitDeclarations(scope->declarations());
Add<HSimulate>(BailoutId::Declarations());
Add<HStackCheck>(HStackCheck::kFunctionEntry);
- VisitStatements(current_info()->function()->body());
+ VisitStatements(current_info()->literal()->body());
if (HasStackOverflow()) return false;
if (current_block() != NULL) {
@@ -5504,14 +5643,22 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
instr->SetDependsOnFlag(kGlobalVars);
return ast_context()->ReturnInstruction(instr, expr->id());
}
+ } else if (variable->IsGlobalSlot()) {
+ DCHECK(variable->index() > 0);
+ DCHECK(variable->IsStaticGlobalObjectProperty());
+ int slot_index = variable->index();
+ int depth = scope()->ContextChainLength(variable->scope());
+
+ HLoadGlobalViaContext* instr =
+ New<HLoadGlobalViaContext>(depth, slot_index);
+ return ast_context()->ReturnInstruction(instr, expr->id());
+
} else {
HValue* global_object = Add<HLoadNamedField>(
context(), nullptr,
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
- HLoadGlobalGeneric* instr =
- New<HLoadGlobalGeneric>(global_object,
- variable->name(),
- ast_context()->is_for_typeof());
+ HLoadGlobalGeneric* instr = New<HLoadGlobalGeneric>(
+ global_object, variable->name(), ast_context()->typeof_mode());
instr->SetVectorAndSlot(handle(current_feedback_vector(), isolate()),
expr->VariableFeedbackSlot());
return ast_context()->ReturnInstruction(instr, expr->id());
@@ -5703,7 +5850,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// The object is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
Push(literal);
-
+ int store_slot_index = 0;
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->is_computed_name()) return Bailout(kComputedPropertyName);
@@ -5724,23 +5871,14 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
CHECK_ALIVE(VisitForValue(value));
HValue* value = Pop();
- // Add [[HomeObject]] to function literals.
- if (FunctionLiteral::NeedsHomeObject(property->value())) {
- Handle<Symbol> sym = isolate()->factory()->home_object_symbol();
- HInstruction* store_home = BuildKeyedGeneric(
- STORE, NULL, value, Add<HConstant>(sym), literal);
- AddInstruction(store_home);
- DCHECK(store_home->HasObservableSideEffects());
- Add<HSimulate>(property->value()->id(), REMOVABLE_SIMULATE);
- }
-
Handle<Map> map = property->GetReceiverType();
Handle<String> name = key->AsPropertyName();
HValue* store;
+ FeedbackVectorICSlot slot = expr->GetNthSlot(store_slot_index++);
if (map.is_null()) {
// If we don't know the monomorphic type, do a generic store.
- CHECK_ALIVE(store = BuildNamedGeneric(
- STORE, NULL, literal, name, value));
+ CHECK_ALIVE(store = BuildNamedGeneric(STORE, NULL, slot, literal,
+ name, value));
} else {
PropertyAccessInfo info(this, STORE, map, name);
if (info.CanAccessMonomorphic()) {
@@ -5750,8 +5888,8 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
&info, literal, checked_literal, value,
BailoutId::None(), BailoutId::None());
} else {
- CHECK_ALIVE(store = BuildNamedGeneric(
- STORE, NULL, literal, name, value));
+ CHECK_ALIVE(store = BuildNamedGeneric(STORE, NULL, slot,
+ literal, name, value));
}
}
if (store->IsInstruction()) {
@@ -5759,6 +5897,17 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
DCHECK(store->HasObservableSideEffects());
Add<HSimulate>(key->id(), REMOVABLE_SIMULATE);
+
+ // Add [[HomeObject]] to function literals.
+ if (FunctionLiteral::NeedsHomeObject(property->value())) {
+ Handle<Symbol> sym = isolate()->factory()->home_object_symbol();
+ HInstruction* store_home = BuildNamedGeneric(
+ STORE, NULL, expr->GetNthSlot(store_slot_index++), value, sym,
+ literal);
+ AddInstruction(store_home);
+ DCHECK(store_home->HasObservableSideEffects());
+ Add<HSimulate>(property->value()->id(), REMOVABLE_SIMULATE);
+ }
} else {
CHECK_ALIVE(VisitForEffect(value));
}
@@ -5773,6 +5922,9 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
+ // Crankshaft may not consume all the slots because it doesn't emit accessors.
+ DCHECK(!FLAG_vector_stores || store_slot_index <= expr->slot_count());
+
if (expr->has_function()) {
// Return the result of the transformation to fast properties
// instead of the original since this operation changes the map
@@ -6240,7 +6392,7 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
int descriptor = transition()->LastAdded();
int index =
transition()->instance_descriptors()->GetFieldIndex(descriptor) -
- map_->inobject_properties();
+ map_->GetInObjectProperties();
PropertyDetails details =
transition()->instance_descriptors()->GetDetails(descriptor);
Representation representation = details.representation();
@@ -6407,9 +6559,9 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
- PropertyAccessType access_type, Expression* expr, BailoutId ast_id,
- BailoutId return_id, HValue* object, HValue* value, SmallMapList* maps,
- Handle<String> name) {
+ PropertyAccessType access_type, Expression* expr, FeedbackVectorICSlot slot,
+ BailoutId ast_id, BailoutId return_id, HValue* object, HValue* value,
+ SmallMapList* maps, Handle<String> name) {
// Something did not match; must use a polymorphic load.
int count = 0;
HBasicBlock* join = NULL;
@@ -6527,8 +6679,8 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
FinishExitWithHardDeoptimization(
Deoptimizer::kUnknownMapInPolymorphicAccess);
} else {
- HInstruction* instr = BuildNamedGeneric(access_type, expr, object, name,
- value);
+ HInstruction* instr =
+ BuildNamedGeneric(access_type, expr, slot, object, name, value);
AddInstruction(instr);
if (!ast_context()->IsEffect()) Push(access_type == LOAD ? instr : value);
@@ -6576,10 +6728,9 @@ static bool AreStringTypes(SmallMapList* maps) {
}
-void HOptimizedGraphBuilder::BuildStore(Expression* expr,
- Property* prop,
- BailoutId ast_id,
- BailoutId return_id,
+void HOptimizedGraphBuilder::BuildStore(Expression* expr, Property* prop,
+ FeedbackVectorICSlot slot,
+ BailoutId ast_id, BailoutId return_id,
bool is_uninitialized) {
if (!prop->key()->IsPropertyName()) {
// Keyed store.
@@ -6587,8 +6738,9 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr,
HValue* key = Pop();
HValue* object = Pop();
bool has_side_effects = false;
- HValue* result = HandleKeyedElementAccess(
- object, key, value, expr, ast_id, return_id, STORE, &has_side_effects);
+ HValue* result =
+ HandleKeyedElementAccess(object, key, value, expr, slot, ast_id,
+ return_id, STORE, &has_side_effects);
if (has_side_effects) {
if (!ast_context()->IsEffect()) Push(value);
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -6606,8 +6758,8 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr,
Handle<String> name = Handle<String>::cast(key->value());
DCHECK(!name.is_null());
- HValue* access = BuildNamedAccess(STORE, ast_id, return_id, expr, object,
- name, value, is_uninitialized);
+ HValue* access = BuildNamedAccess(STORE, ast_id, return_id, expr, slot,
+ object, name, value, is_uninitialized);
if (access == NULL) return;
if (!ast_context()->IsEffect()) Push(value);
@@ -6628,7 +6780,7 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
CHECK_ALIVE(VisitForValue(prop->key()));
}
CHECK_ALIVE(VisitForValue(expr->value()));
- BuildStore(expr, prop, expr->id(),
+ BuildStore(expr, prop, expr->AssignmentSlot(), expr->id(),
expr->AssignmentId(), expr->IsUninitialized());
}
@@ -6637,8 +6789,7 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
// superclass of Assignment and CountOperation, we cannot just pass the
// owning expression instead of position and ast_id separately.
void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
- Variable* var,
- HValue* value,
+ Variable* var, HValue* value, FeedbackVectorICSlot ic_slot,
BailoutId ast_id) {
Handle<GlobalObject> global(current_info()->global_object());
@@ -6727,6 +6878,18 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
if (instr->HasObservableSideEffects()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
+ } else if (var->IsGlobalSlot()) {
+ DCHECK(var->index() > 0);
+ DCHECK(var->IsStaticGlobalObjectProperty());
+ int slot_index = var->index();
+ int depth = scope()->ContextChainLength(var->scope());
+
+ HStoreGlobalViaContext* instr = Add<HStoreGlobalViaContext>(
+ value, depth, slot_index, function_language_mode());
+ USE(instr);
+ DCHECK(instr->HasObservableSideEffects());
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+
} else {
HValue* global_object = Add<HLoadNamedField>(
context(), nullptr,
@@ -6734,6 +6897,11 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
HStoreNamedGeneric* instr =
Add<HStoreNamedGeneric>(global_object, var->name(), value,
function_language_mode(), PREMONOMORPHIC);
+ if (FLAG_vector_stores) {
+ Handle<TypeFeedbackVector> vector =
+ handle(current_feedback_vector(), isolate());
+ instr->SetVectorAndSlot(vector, ic_slot);
+ }
USE(instr);
DCHECK(instr->HasObservableSideEffects());
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -6762,8 +6930,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
switch (var->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED:
- HandleGlobalVariableAssignment(var,
- Top(),
+ HandleGlobalVariableAssignment(var, Top(), expr->AssignmentSlot(),
expr->AssignmentId());
break;
@@ -6839,7 +7006,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Push(BuildBinaryOperation(operation, left, right, PUSH_BEFORE_SIMULATE));
- BuildStore(expr, prop, expr->id(),
+ BuildStore(expr, prop, expr->AssignmentSlot(), expr->id(),
expr->AssignmentId(), expr->IsUninitialized());
} else {
return Bailout(kInvalidLhsInCompoundAssignment);
@@ -6890,8 +7057,7 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED:
CHECK_ALIVE(VisitForValue(expr->value()));
- HandleGlobalVariableAssignment(var,
- Top(),
+ HandleGlobalVariableAssignment(var, Top(), expr->AssignmentSlot(),
expr->AssignmentId());
return ast_context()->ReturnValue(Pop());
@@ -7038,8 +7204,8 @@ HInstruction* HGraphBuilder::BuildLoadStringLength(HValue* string) {
HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
- PropertyAccessType access_type, Expression* expr, HValue* object,
- Handle<String> name, HValue* value, bool is_uninitialized) {
+ PropertyAccessType access_type, Expression* expr, FeedbackVectorICSlot slot,
+ HValue* object, Handle<Name> name, HValue* value, bool is_uninitialized) {
if (is_uninitialized) {
Add<HDeoptimize>(
Deoptimizer::kInsufficientTypeFeedbackForGenericNamedAccess,
@@ -7048,7 +7214,6 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
if (access_type == LOAD) {
Handle<TypeFeedbackVector> vector =
handle(current_feedback_vector(), isolate());
- FeedbackVectorICSlot slot = expr->AsProperty()->PropertyFeedbackSlot();
if (!expr->AsProperty()->key()->IsPropertyName()) {
// It's possible that a keyed load of a constant string was converted
@@ -7067,19 +7232,36 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
result->SetVectorAndSlot(vector, slot);
return result;
} else {
- return New<HStoreNamedGeneric>(object, name, value,
- function_language_mode(), PREMONOMORPHIC);
+ if (FLAG_vector_stores &&
+ current_feedback_vector()->GetKind(slot) == Code::KEYED_STORE_IC) {
+ // It's possible that a keyed store of a constant string was converted
+ // to a named store. Here, at the last minute, we need to make sure to
+ // use a generic Keyed Store if we are using the type vector, because
+ // it has to share information with full code.
+ HConstant* key = Add<HConstant>(name);
+ HStoreKeyedGeneric* result = New<HStoreKeyedGeneric>(
+ object, key, value, function_language_mode(), PREMONOMORPHIC);
+ Handle<TypeFeedbackVector> vector =
+ handle(current_feedback_vector(), isolate());
+ result->SetVectorAndSlot(vector, slot);
+ return result;
+ }
+
+ HStoreNamedGeneric* result = New<HStoreNamedGeneric>(
+ object, name, value, function_language_mode(), PREMONOMORPHIC);
+ if (FLAG_vector_stores) {
+ Handle<TypeFeedbackVector> vector =
+ handle(current_feedback_vector(), isolate());
+ result->SetVectorAndSlot(vector, slot);
+ }
+ return result;
}
}
-
HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
- PropertyAccessType access_type,
- Expression* expr,
- HValue* object,
- HValue* key,
- HValue* value) {
+ PropertyAccessType access_type, Expression* expr, FeedbackVectorICSlot slot,
+ HValue* object, HValue* key, HValue* value) {
if (access_type == LOAD) {
InlineCacheState initial_state = expr->AsProperty()->GetInlineCacheState();
HLoadKeyedGeneric* result = New<HLoadKeyedGeneric>(
@@ -7090,13 +7272,18 @@ HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
// We need to pass vector information.
Handle<TypeFeedbackVector> vector =
handle(current_feedback_vector(), isolate());
- FeedbackVectorICSlot slot = expr->AsProperty()->PropertyFeedbackSlot();
result->SetVectorAndSlot(vector, slot);
}
return result;
} else {
- return New<HStoreKeyedGeneric>(object, key, value, function_language_mode(),
- PREMONOMORPHIC);
+ HStoreKeyedGeneric* result = New<HStoreKeyedGeneric>(
+ object, key, value, function_language_mode(), PREMONOMORPHIC);
+ if (FLAG_vector_stores) {
+ Handle<TypeFeedbackVector> vector =
+ handle(current_feedback_vector(), isolate());
+ result->SetVectorAndSlot(vector, slot);
+ }
+ return result;
}
}
@@ -7233,14 +7420,9 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
- Expression* expr,
- HValue* object,
- HValue* key,
- HValue* val,
- SmallMapList* maps,
- PropertyAccessType access_type,
- KeyedAccessStoreMode store_mode,
- bool* has_side_effects) {
+ Expression* expr, FeedbackVectorICSlot slot, HValue* object, HValue* key,
+ HValue* val, SmallMapList* maps, PropertyAccessType access_type,
+ KeyedAccessStoreMode store_mode, bool* has_side_effects) {
*has_side_effects = false;
BuildCheckHeapObject(object);
@@ -7268,8 +7450,8 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
possible_transitioned_maps.Add(map);
}
if (IsSloppyArgumentsElements(elements_kind)) {
- HInstruction* result = BuildKeyedGeneric(access_type, expr, object, key,
- val);
+ HInstruction* result =
+ BuildKeyedGeneric(access_type, expr, slot, object, key, val);
*has_side_effects = result->HasObservableSideEffects();
return AddInstruction(result);
}
@@ -7305,8 +7487,8 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
Handle<Map> untransitionable_map = untransitionable_maps[0];
HInstruction* instr = NULL;
if (!CanInlineElementAccess(untransitionable_map)) {
- instr = AddInstruction(BuildKeyedGeneric(access_type, expr, object, key,
- val));
+ instr = AddInstruction(
+ BuildKeyedGeneric(access_type, expr, slot, object, key, val));
} else {
instr = BuildMonomorphicElementAccess(
object, key, val, transition, untransitionable_map, access_type,
@@ -7330,11 +7512,10 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
set_current_block(this_map);
HInstruction* access = NULL;
if (!CanInlineElementAccess(map)) {
- access = AddInstruction(BuildKeyedGeneric(access_type, expr, object, key,
- val));
+ access = AddInstruction(
+ BuildKeyedGeneric(access_type, expr, slot, object, key, val));
} else {
DCHECK(IsFastElementsKind(elements_kind) ||
- IsExternalArrayElementsKind(elements_kind) ||
IsFixedTypedArrayElementsKind(elements_kind));
LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
// Happily, mapcompare is a checked object.
@@ -7371,9 +7552,9 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
- HValue* obj, HValue* key, HValue* val, Expression* expr, BailoutId ast_id,
- BailoutId return_id, PropertyAccessType access_type,
- bool* has_side_effects) {
+ HValue* obj, HValue* key, HValue* val, Expression* expr,
+ FeedbackVectorICSlot slot, BailoutId ast_id, BailoutId return_id,
+ PropertyAccessType access_type, bool* has_side_effects) {
if (key->ActualValue()->IsConstant()) {
Handle<Object> constant =
HConstant::cast(key->ActualValue())->handle(isolate());
@@ -7385,7 +7566,7 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
Handle<String>::cast(constant));
}
HValue* access =
- BuildNamedAccess(access_type, ast_id, return_id, expr, obj,
+ BuildNamedAccess(access_type, ast_id, return_id, expr, slot, obj,
Handle<String>::cast(constant), val, false);
if (access == NULL || access->IsPhi() ||
HInstruction::cast(access)->IsLinked()) {
@@ -7444,15 +7625,15 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
if (monomorphic) {
Handle<Map> map = maps->first();
if (!CanInlineElementAccess(map)) {
- instr = AddInstruction(BuildKeyedGeneric(access_type, expr, obj, key,
- val));
+ instr = AddInstruction(
+ BuildKeyedGeneric(access_type, expr, slot, obj, key, val));
} else {
BuildCheckHeapObject(obj);
instr = BuildMonomorphicElementAccess(
obj, key, val, NULL, map, access_type, expr->GetStoreMode());
}
} else if (!force_generic && (maps != NULL && !maps->is_empty())) {
- return HandlePolymorphicElementAccess(expr, obj, key, val, maps,
+ return HandlePolymorphicElementAccess(expr, slot, obj, key, val, maps,
access_type, expr->GetStoreMode(),
has_side_effects);
} else {
@@ -7468,7 +7649,8 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
Deoptimizer::SOFT);
}
}
- instr = AddInstruction(BuildKeyedGeneric(access_type, expr, obj, key, val));
+ instr = AddInstruction(
+ BuildKeyedGeneric(access_type, expr, slot, obj, key, val));
}
*has_side_effects = instr->HasObservableSideEffects();
return instr;
@@ -7556,8 +7738,8 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
HValue* HOptimizedGraphBuilder::BuildNamedAccess(
PropertyAccessType access, BailoutId ast_id, BailoutId return_id,
- Expression* expr, HValue* object, Handle<String> name, HValue* value,
- bool is_uninitialized) {
+ Expression* expr, FeedbackVectorICSlot slot, HValue* object,
+ Handle<String> name, HValue* value, bool is_uninitialized) {
SmallMapList* maps;
ComputeReceiverTypes(expr, object, &maps, zone());
DCHECK(maps != NULL);
@@ -7565,8 +7747,8 @@ HValue* HOptimizedGraphBuilder::BuildNamedAccess(
if (maps->length() > 0) {
PropertyAccessInfo info(this, access, maps->first(), name);
if (!info.CanAccessAsMonomorphic(maps)) {
- HandlePolymorphicNamedFieldAccess(access, expr, ast_id, return_id, object,
- value, maps, name);
+ HandlePolymorphicNamedFieldAccess(access, expr, slot, ast_id, return_id,
+ object, value, maps, name);
return NULL;
}
@@ -7584,7 +7766,8 @@ HValue* HOptimizedGraphBuilder::BuildNamedAccess(
&info, object, checked_object, value, ast_id, return_id);
}
- return BuildNamedGeneric(access, expr, object, name, value, is_uninitialized);
+ return BuildNamedGeneric(access, expr, slot, object, name, value,
+ is_uninitialized);
}
@@ -7612,8 +7795,9 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr,
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
HValue* object = Pop();
- HValue* value = BuildNamedAccess(LOAD, ast_id, expr->LoadId(), expr, object,
- name, NULL, expr->IsUninitialized());
+ HValue* value = BuildNamedAccess(LOAD, ast_id, expr->LoadId(), expr,
+ expr->PropertyFeedbackSlot(), object, name,
+ NULL, expr->IsUninitialized());
if (value == NULL) return;
if (value->IsPhi()) return ast_context()->ReturnValue(value);
instr = HInstruction::cast(value);
@@ -7625,7 +7809,8 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr,
bool has_side_effects = false;
HValue* load = HandleKeyedElementAccess(
- obj, key, NULL, expr, ast_id, expr->LoadId(), LOAD, &has_side_effects);
+ obj, key, NULL, expr, expr->PropertyFeedbackSlot(), ast_id,
+ expr->LoadId(), LOAD, &has_side_effects);
if (has_side_effects) {
if (ast_context()->IsEffect()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -7872,7 +8057,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
bool try_inline = FLAG_polymorphic_inlining && !needs_wrapping;
if (FLAG_trace_inlining && try_inline) {
Handle<JSFunction> caller = current_info()->closure();
- SmartArrayPointer<char> caller_name =
+ base::SmartArrayPointer<char> caller_name =
caller->shared()->DebugName()->ToCString();
PrintF("Trying to inline the polymorphic call to %s from %s\n",
name->ToCString().get(),
@@ -7908,8 +8093,9 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
FinishExitWithHardDeoptimization(Deoptimizer::kUnknownMapInPolymorphicCall);
} else {
Property* prop = expr->expression()->AsProperty();
- HInstruction* function = BuildNamedGeneric(
- LOAD, prop, receiver, name, NULL, prop->IsUninitialized());
+ HInstruction* function =
+ BuildNamedGeneric(LOAD, prop, prop->PropertyFeedbackSlot(), receiver,
+ name, NULL, prop->IsUninitialized());
AddInstruction(function);
Push(function);
AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
@@ -7954,9 +8140,9 @@ void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
Handle<JSFunction> caller,
const char* reason) {
if (FLAG_trace_inlining) {
- SmartArrayPointer<char> target_name =
+ base::SmartArrayPointer<char> target_name =
target->shared()->DebugName()->ToCString();
- SmartArrayPointer<char> caller_name =
+ base::SmartArrayPointer<char> caller_name =
caller->shared()->DebugName()->ToCString();
if (reason == NULL) {
PrintF("Inlined %s called from %s.\n", target_name.get(),
@@ -8077,6 +8263,10 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
CompilationInfo target_info(&parse_info);
Handle<SharedFunctionInfo> target_shared(target->shared());
+ if (target_shared->HasDebugInfo()) {
+ TraceInline(target, caller, "target is being debugged");
+ return false;
+ }
if (!Compiler::ParseAndAnalyze(target_info.parse_info())) {
if (target_info.isolate()->has_pending_exception()) {
// Parse or scope error, never optimize this function.
@@ -8091,7 +8281,7 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
TraceInline(target, caller, "target has context-allocated variables");
return false;
}
- FunctionLiteral* function = target_info.function();
+ FunctionLiteral* function = target_info.literal();
// The following conditions must be checked again after re-parsing, because
// earlier the information might not have been complete due to lazy parsing.
@@ -9556,9 +9746,9 @@ void HOptimizedGraphBuilder::BuildInlinedCallArray(
// Checks whether allocation using the given constructor can be inlined.
static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
return constructor->has_initial_map() &&
- constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
- constructor->initial_map()->instance_size() < HAllocate::kMaxInlineSize &&
- constructor->initial_map()->InitialPropertiesLength() == 0;
+ constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
+ constructor->initial_map()->instance_size() <
+ HAllocate::kMaxInlineSize;
}
@@ -9644,7 +9834,6 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
DCHECK(constructor->has_initial_map());
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size();
- DCHECK(initial_map->InitialPropertiesLength() == 0);
// Allocate an instance of the implicit receiver object.
HValue* size_in_bytes = Add<HConstant>(instance_size);
@@ -9723,9 +9912,9 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
void HOptimizedGraphBuilder::BuildInitializeInobjectProperties(
HValue* receiver, Handle<Map> initial_map) {
- if (initial_map->inobject_properties() != 0) {
+ if (initial_map->GetInObjectProperties() != 0) {
HConstant* undefined = graph()->GetConstantUndefined();
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
+ for (int i = 0; i < initial_map->GetInObjectProperties(); i++) {
int property_offset = initial_map->GetInObjectPropertyOffset(i);
Add<HStoreNamedField>(receiver, HObjectAccess::ForMapAndOffset(
initial_map, property_offset),
@@ -9866,14 +10055,14 @@ HValue* HOptimizedGraphBuilder::BuildAllocateExternalElements(
bool is_zero_byte_offset,
HValue* buffer, HValue* byte_offset, HValue* length) {
Handle<Map> external_array_map(
- isolate()->heap()->MapForExternalArrayType(array_type));
+ isolate()->heap()->MapForFixedTypedArray(array_type));
// The HForceRepresentation is to prevent possible deopt on int-smi
// conversion after allocation but before the new object fields are set.
length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
- HValue* elements =
- Add<HAllocate>(Add<HConstant>(ExternalArray::kSize), HType::HeapObject(),
- NOT_TENURED, external_array_map->instance_type());
+ HValue* elements = Add<HAllocate>(
+ Add<HConstant>(FixedTypedArrayBase::kHeaderSize), HType::HeapObject(),
+ NOT_TENURED, external_array_map->instance_type());
AddStoreMapConstant(elements, external_array_map);
Add<HStoreNamedField>(elements,
@@ -9895,8 +10084,11 @@ HValue* HOptimizedGraphBuilder::BuildAllocateExternalElements(
}
Add<HStoreNamedField>(elements,
- HObjectAccess::ForExternalArrayExternalPointer(),
- typed_array_start);
+ HObjectAccess::ForFixedTypedArrayBaseBasePointer(),
+ graph()->GetConstant0());
+ Add<HStoreNamedField>(elements,
+ HObjectAccess::ForFixedTypedArrayBaseExternalPointer(),
+ typed_array_start);
return elements;
}
@@ -9944,15 +10136,23 @@ HValue* HOptimizedGraphBuilder::BuildAllocateFixedTypedArray(
Add<HStoreNamedField>(
elements, HObjectAccess::ForFixedTypedArrayBaseBasePointer(), elements);
+ Add<HStoreNamedField>(
+ elements, HObjectAccess::ForFixedTypedArrayBaseExternalPointer(),
+ Add<HConstant>(ExternalReference::fixed_typed_array_base_data_offset()));
+
HValue* filler = Add<HConstant>(static_cast<int32_t>(0));
if (initialize) {
LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
+ HValue* backing_store = AddUncasted<HAdd>(
+ Add<HConstant>(ExternalReference::fixed_typed_array_base_data_offset()),
+ elements, Strength::WEAK, AddOfExternalAndTagged);
+
HValue* key = builder.BeginBody(
Add<HConstant>(static_cast<int32_t>(0)),
length, Token::LT);
- Add<HStoreKeyed>(elements, key, filler, fixed_elements_kind);
+ Add<HStoreKeyed>(backing_store, key, filler, fixed_elements_kind);
builder.EndBody();
}
@@ -10035,13 +10235,10 @@ void HOptimizedGraphBuilder::GenerateTypedArrayInitialize(
ExternalArrayType array_type =
kExternalInt8Array; // Bogus initialization.
size_t element_size = 1; // Bogus initialization.
- ElementsKind external_elements_kind = // Bogus initialization.
- EXTERNAL_INT8_ELEMENTS;
ElementsKind fixed_elements_kind = // Bogus initialization.
INT8_ELEMENTS;
Runtime::ArrayIdToTypeAndSize(array_id,
&array_type,
- &external_elements_kind,
&fixed_elements_kind,
&element_size);
@@ -10066,8 +10263,8 @@ void HOptimizedGraphBuilder::GenerateTypedArrayInitialize(
if (buffer != NULL) {
elements = BuildAllocateExternalElements(
array_type, is_zero_byte_offset, buffer, byte_offset, length);
- Handle<Map> obj_map = TypedArrayMap(
- isolate(), array_type, external_elements_kind);
+ Handle<Map> obj_map =
+ TypedArrayMap(isolate(), array_type, fixed_elements_kind);
AddStoreMapConstant(obj, obj_map);
} else {
DCHECK(is_zero_byte_offset);
@@ -10213,11 +10410,13 @@ void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(prop->key()));
HValue* key = Pop();
HValue* obj = Pop();
- HValue* function = AddLoadJSBuiltin(Builtins::DELETE);
- Add<HPushArguments>(obj, key, Add<HConstant>(function_language_mode()));
- // TODO(olivf) InvokeFunction produces a check for the parameter count,
- // even though we are certain to pass the correct number of arguments here.
- HInstruction* instr = New<HInvokeFunction>(function, 3);
+ Add<HPushArguments>(obj, key);
+ HInstruction* instr = New<HCallRuntime>(
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(is_strict(function_language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy),
+ 2);
return ast_context()->ReturnInstruction(instr, expr->id());
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -10355,18 +10554,15 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
}
-void HOptimizedGraphBuilder::BuildStoreForEffect(Expression* expr,
- Property* prop,
- BailoutId ast_id,
- BailoutId return_id,
- HValue* object,
- HValue* key,
- HValue* value) {
+void HOptimizedGraphBuilder::BuildStoreForEffect(
+ Expression* expr, Property* prop, FeedbackVectorICSlot slot,
+ BailoutId ast_id, BailoutId return_id, HValue* object, HValue* key,
+ HValue* value) {
EffectContext for_effect(this);
Push(object);
if (key != NULL) Push(key);
Push(value);
- BuildStore(expr, prop, ast_id, return_id);
+ BuildStore(expr, prop, slot, ast_id, return_id);
}
@@ -10409,8 +10605,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
switch (var->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED:
- HandleGlobalVariableAssignment(var,
- after,
+ HandleGlobalVariableAssignment(var, after, expr->CountSlot(),
expr->AssignmentId());
break;
@@ -10476,13 +10671,14 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
// Drop object and key to push it again in the effect context below.
Drop(key == NULL ? 1 : 2);
environment()->SetExpressionStackAt(0, input);
- CHECK_ALIVE(BuildStoreForEffect(
- expr, prop, expr->id(), expr->AssignmentId(), object, key, after));
+ CHECK_ALIVE(BuildStoreForEffect(expr, prop, expr->CountSlot(), expr->id(),
+ expr->AssignmentId(), object, key, after));
return ast_context()->ReturnValue(Pop());
}
environment()->SetExpressionStackAt(0, after);
- return BuildStore(expr, prop, expr->id(), expr->AssignmentId());
+ return BuildStore(expr, prop, expr->CountSlot(), expr->id(),
+ expr->AssignmentId());
}
@@ -10709,40 +10905,43 @@ HValue* HGraphBuilder::BuildBinaryOperation(
// Special case for string addition here.
if (op == Token::ADD &&
(left_type->Is(Type::String()) || right_type->Is(Type::String()))) {
- // Validate type feedback for left argument.
- if (left_type->Is(Type::String())) {
+ if (is_strong(strength)) {
+ // In strong mode, if the one side of an addition is a string,
+ // the other side must be a string too.
left = BuildCheckString(left);
- }
-
- // Validate type feedback for right argument.
- if (right_type->Is(Type::String())) {
right = BuildCheckString(right);
- }
+ } else {
+ // Validate type feedback for left argument.
+ if (left_type->Is(Type::String())) {
+ left = BuildCheckString(left);
+ }
- // Convert left argument as necessary.
- if (left_type->Is(Type::Number()) && !is_strong(strength)) {
- DCHECK(right_type->Is(Type::String()));
- left = BuildNumberToString(left, left_type);
- } else if (!left_type->Is(Type::String())) {
- DCHECK(right_type->Is(Type::String()));
- HValue* function = AddLoadJSBuiltin(
- is_strong(strength) ? Builtins::STRING_ADD_RIGHT_STRONG
- : Builtins::STRING_ADD_RIGHT);
- Add<HPushArguments>(left, right);
- return AddUncasted<HInvokeFunction>(function, 2);
- }
-
- // Convert right argument as necessary.
- if (right_type->Is(Type::Number()) && !is_strong(strength)) {
- DCHECK(left_type->Is(Type::String()));
- right = BuildNumberToString(right, right_type);
- } else if (!right_type->Is(Type::String())) {
- DCHECK(left_type->Is(Type::String()));
- HValue* function = AddLoadJSBuiltin(is_strong(strength)
- ? Builtins::STRING_ADD_LEFT_STRONG
- : Builtins::STRING_ADD_LEFT);
- Add<HPushArguments>(left, right);
- return AddUncasted<HInvokeFunction>(function, 2);
+ // Validate type feedback for right argument.
+ if (right_type->Is(Type::String())) {
+ right = BuildCheckString(right);
+ }
+
+ // Convert left argument as necessary.
+ if (left_type->Is(Type::Number())) {
+ DCHECK(right_type->Is(Type::String()));
+ left = BuildNumberToString(left, left_type);
+ } else if (!left_type->Is(Type::String())) {
+ DCHECK(right_type->Is(Type::String()));
+ HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_RIGHT);
+ Add<HPushArguments>(left, right);
+ return AddUncasted<HInvokeFunction>(function, 2);
+ }
+
+ // Convert right argument as necessary.
+ if (right_type->Is(Type::Number())) {
+ DCHECK(left_type->Is(Type::String()));
+ right = BuildNumberToString(right, right_type);
+ } else if (!right_type->Is(Type::String())) {
+ DCHECK(left_type->Is(Type::String()));
+ HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_LEFT);
+ Add<HPushArguments>(left, right);
+ return AddUncasted<HInvokeFunction>(function, 2);
+ }
}
// Fast paths for empty constant strings.
@@ -11533,7 +11732,7 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
}
}
- int inobject_properties = boilerplate_object->map()->inobject_properties();
+ int inobject_properties = boilerplate_object->map()->GetInObjectProperties();
HInstruction* value_instruction =
Add<HConstant>(isolate()->factory()->one_pointer_filler_map());
for (int i = copied_fields; i < inobject_properties; i++) {
@@ -11840,6 +12039,15 @@ void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateToObject(CallRuntime* call) {
+ DCHECK_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HValue* result = BuildToObject(value);
+ return ast_context()->ReturnValue(result);
+}
+
+
void HOptimizedGraphBuilder::GenerateIsJSProxy(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -11892,15 +12100,6 @@ void HOptimizedGraphBuilder::GenerateHasFastPackedElements(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HIsUndetectableAndBranch* result = New<HIsUndetectableAndBranch>(value);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
// Support for construct call checks.
void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
DCHECK(call->arguments()->length() == 0);
@@ -12240,12 +12439,6 @@ void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
}
-// Support for fast native caches.
-void HOptimizedGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
- return Bailout(kInlinedRuntimeFunctionGetFromCache);
-}
-
-
// Fast support for number to string.
void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
DCHECK_EQ(1, call->arguments()->length());
@@ -12918,7 +13111,7 @@ std::ostream& operator<<(std::ostream& os, const HEnvironment& env) {
void HTracer::TraceCompilation(CompilationInfo* info) {
Tag tag(this, "compilation");
if (info->IsOptimizing()) {
- Handle<String> name = info->function()->debug_name();
+ Handle<String> name = info->literal()->debug_name();
PrintStringProperty("name", name->ToCString().get());
PrintIndent();
trace_.Add("method \"%s:%d\"\n",
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index 65e54e652b..c6953cdad5 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -5,8 +5,6 @@
#ifndef V8_HYDROGEN_H_
#define V8_HYDROGEN_H_
-#include "src/v8.h"
-
#include "src/accessors.h"
#include "src/allocation.h"
#include "src/ast.h"
@@ -758,8 +756,8 @@ class AstContext {
virtual void ReturnContinuation(HIfContinuation* continuation,
BailoutId ast_id) = 0;
- void set_for_typeof(bool for_typeof) { for_typeof_ = for_typeof; }
- bool is_for_typeof() { return for_typeof_; }
+ void set_typeof_mode(TypeofMode typeof_mode) { typeof_mode_ = typeof_mode; }
+ TypeofMode typeof_mode() { return typeof_mode_; }
protected:
AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind);
@@ -779,7 +777,7 @@ class AstContext {
HOptimizedGraphBuilder* owner_;
Expression::Context kind_;
AstContext* outer_;
- bool for_typeof_;
+ TypeofMode typeof_mode_;
};
@@ -1329,6 +1327,7 @@ class HGraphBuilder {
bool is_jsarray);
HValue* BuildNumberToString(HValue* object, Type* type);
+ HValue* BuildToObject(HValue* receiver);
void BuildJSObjectCheck(HValue* receiver,
int bit_field_mask);
@@ -1596,6 +1595,7 @@ class HGraphBuilder {
void Then();
void Else();
void End();
+ void EndUnreachable();
void Deopt(Deoptimizer::DeoptReason reason);
void ThenDeopt(Deoptimizer::DeoptReason reason) {
@@ -1860,6 +1860,10 @@ class HGraphBuilder {
HInstruction* BuildGetNativeContext(HValue* closure);
HInstruction* BuildGetNativeContext();
HInstruction* BuildGetScriptContext(int context_index);
+ // Builds a loop version if |depth| is specified or unrolls the loop to
+ // |depth_value| iterations otherwise.
+ HValue* BuildGetParentContext(HValue* depth, int depth_value);
+
HInstruction* BuildGetArrayFunction();
HValue* BuildArrayBufferViewFieldAccessor(HValue* object,
HValue* checked_object,
@@ -2191,8 +2195,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(TwoByteSeqStringSetChar) \
F(ObjectEquals) \
F(IsObject) \
+ F(ToObject) \
F(IsFunction) \
- F(IsUndetectableObject) \
F(IsSpecObject) \
F(MathPow) \
F(IsMinusZero) \
@@ -2206,7 +2210,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(StringCompare) \
F(RegExpExec) \
F(RegExpConstructResult) \
- F(GetFromCache) \
F(NumberToString) \
F(DebugIsActive) \
F(Likely) \
@@ -2448,20 +2451,16 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
Handle<JSFunction> caller,
const char* failure_reason);
- void HandleGlobalVariableAssignment(Variable* var,
- HValue* value,
+ void HandleGlobalVariableAssignment(Variable* var, HValue* value,
+ FeedbackVectorICSlot ic_slot,
BailoutId ast_id);
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
- void HandlePolymorphicNamedFieldAccess(PropertyAccessType access_type,
- Expression* expr,
- BailoutId ast_id,
- BailoutId return_id,
- HValue* object,
- HValue* value,
- SmallMapList* types,
- Handle<String> name);
+ void HandlePolymorphicNamedFieldAccess(
+ PropertyAccessType access_type, Expression* expr,
+ FeedbackVectorICSlot slot, BailoutId ast_id, BailoutId return_id,
+ HValue* object, HValue* value, SmallMapList* types, Handle<String> name);
HValue* BuildAllocateExternalElements(
ExternalArrayType array_type,
@@ -2637,7 +2636,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
lookup_type_ == TRANSITION_TYPE);
DCHECK(number_ < map->NumberOfOwnDescriptors());
int field_index = map->instance_descriptors()->GetFieldIndex(number_);
- return field_index - map->inobject_properties();
+ return field_index - map->GetInObjectProperties();
}
void LookupDescriptor(Map* map, Name* name) {
@@ -2710,7 +2709,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* BuildNamedAccess(PropertyAccessType access, BailoutId ast_id,
BailoutId reutrn_id, Expression* expr,
- HValue* object, Handle<String> name, HValue* value,
+ FeedbackVectorICSlot slot, HValue* object,
+ Handle<String> name, HValue* value,
bool is_uninitialized = false);
void HandlePolymorphicCallNamed(Call* expr,
@@ -2746,10 +2746,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
HInstruction* BuildKeyedGeneric(PropertyAccessType access_type,
- Expression* expr,
- HValue* object,
- HValue* key,
- HValue* value);
+ Expression* expr, FeedbackVectorICSlot slot,
+ HValue* object, HValue* key, HValue* value);
HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
HValue* key,
@@ -2766,24 +2764,21 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
PropertyAccessType access_type,
KeyedAccessStoreMode store_mode);
- HValue* HandlePolymorphicElementAccess(Expression* expr,
- HValue* object,
- HValue* key,
- HValue* val,
- SmallMapList* maps,
- PropertyAccessType access_type,
- KeyedAccessStoreMode store_mode,
- bool* has_side_effects);
+ HValue* HandlePolymorphicElementAccess(
+ Expression* expr, FeedbackVectorICSlot slot, HValue* object, HValue* key,
+ HValue* val, SmallMapList* maps, PropertyAccessType access_type,
+ KeyedAccessStoreMode store_mode, bool* has_side_effects);
HValue* HandleKeyedElementAccess(HValue* obj, HValue* key, HValue* val,
- Expression* expr, BailoutId ast_id,
- BailoutId return_id,
+ Expression* expr, FeedbackVectorICSlot slot,
+ BailoutId ast_id, BailoutId return_id,
PropertyAccessType access_type,
bool* has_side_effects);
HInstruction* BuildNamedGeneric(PropertyAccessType access, Expression* expr,
- HValue* object, Handle<String> name,
- HValue* value, bool is_uninitialized = false);
+ FeedbackVectorICSlot slot, HValue* object,
+ Handle<Name> name, HValue* value,
+ bool is_uninitialized = false);
HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
@@ -2793,19 +2788,14 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* object,
HValue* key);
- void BuildStoreForEffect(Expression* expression,
- Property* prop,
- BailoutId ast_id,
- BailoutId return_id,
- HValue* object,
- HValue* key,
+ void BuildStoreForEffect(Expression* expression, Property* prop,
+ FeedbackVectorICSlot slot, BailoutId ast_id,
+ BailoutId return_id, HValue* object, HValue* key,
HValue* value);
- void BuildStore(Expression* expression,
- Property* prop,
- BailoutId ast_id,
- BailoutId return_id,
- bool is_uninitialized = false);
+ void BuildStore(Expression* expression, Property* prop,
+ FeedbackVectorICSlot slot, BailoutId ast_id,
+ BailoutId return_id, bool is_uninitialized = false);
HInstruction* BuildLoadNamedField(PropertyAccessInfo* info,
HValue* checked_object);
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc
index 1d735c97f1..7899f2937d 100644
--- a/deps/v8/src/i18n.cc
+++ b/deps/v8/src/i18n.cc
@@ -5,6 +5,9 @@
#include "src/i18n.h"
+#include "src/api.h"
+#include "src/factory.h"
+#include "src/isolate.h"
#include "unicode/brkiter.h"
#include "unicode/calendar.h"
#include "unicode/coll.h"
@@ -258,7 +261,24 @@ icu::DecimalFormat* CreateICUNumberFormat(
#endif
number_format = static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createInstance(icu_locale, format_style, status));
+ icu::NumberFormat::createInstance(icu_locale, format_style, status));
+
+ if (U_FAILURE(status)) {
+ delete number_format;
+ return NULL;
+ }
+
+ UErrorCode status_digits = U_ZERO_ERROR;
+ uint32_t fraction_digits = ucurr_getDefaultFractionDigits(
+ currency.getTerminatedBuffer(), &status_digits);
+ if (U_SUCCESS(status_digits)) {
+ number_format->setMinimumFractionDigits(fraction_digits);
+ number_format->setMaximumFractionDigits(fraction_digits);
+ } else {
+ // Set min & max to default values (previously in i18n.js)
+ number_format->setMinimumFractionDigits(0);
+ number_format->setMaximumFractionDigits(3);
+ }
} else if (style == UNICODE_STRING_SIMPLE("percent")) {
number_format = static_cast<icu::DecimalFormat*>(
icu::NumberFormat::createPercentInstance(icu_locale, status));
diff --git a/deps/v8/src/i18n.h b/deps/v8/src/i18n.h
index a50c43a429..ea8380baa7 100644
--- a/deps/v8/src/i18n.h
+++ b/deps/v8/src/i18n.h
@@ -6,7 +6,7 @@
#ifndef V8_I18N_H_
#define V8_I18N_H_
-#include "src/v8.h"
+#include "src/handles.h"
#include "unicode/uversion.h"
namespace U_ICU_NAMESPACE {
@@ -19,6 +19,9 @@ class SimpleDateFormat;
namespace v8 {
namespace internal {
+// Forward declarations.
+class ObjectTemplateInfo;
+
class I18N {
public:
// Creates an ObjectTemplate with one internal field.
diff --git a/deps/v8/src/i18n.js b/deps/v8/src/i18n.js
index 79e988062e..5fd32c8b40 100644
--- a/deps/v8/src/i18n.js
+++ b/deps/v8/src/i18n.js
@@ -17,19 +17,15 @@
// -------------------------------------------------------------------
// Imports
+var ArrayIndexOf;
+var ArrayJoin;
+var IsFinite;
+var IsNaN;
var GlobalBoolean = global.Boolean;
var GlobalDate = global.Date;
var GlobalNumber = global.Number;
var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
-var ObjectDefineProperties = utils.ObjectDefineProperties;
-var ObjectDefineProperty = utils.ObjectDefineProperty;
-var SetFunctionName = utils.SetFunctionName;
-
-var ArrayIndexOf;
-var ArrayJoin;
-var IsFinite;
-var IsNaN;
var MathFloor;
var RegExpTest;
var StringIndexOf;
@@ -54,6 +50,12 @@ utils.Import(function(from) {
StringSplit = from.StringSplit;
StringSubstr = from.StringSubstr;
StringSubstring = from.StringSubstring;
+ ToNumber = from.ToNumber;
+});
+
+utils.ImportNow(function(from) {
+ ObjectDefineProperties = from.ObjectDefineProperties;
+ ObjectDefineProperty = from.ObjectDefineProperty;
});
// -------------------------------------------------------------------
@@ -218,7 +220,7 @@ function addBoundMethod(obj, methodName, implementation, length) {
}
}
}
- SetFunctionName(boundMethod, internalName);
+ %FunctionSetName(boundMethod, internalName);
%FunctionRemovePrototype(boundMethod);
%SetNativeFlag(boundMethod);
this[internalName] = boundMethod;
@@ -226,7 +228,7 @@ function addBoundMethod(obj, methodName, implementation, length) {
return this[internalName];
}
- SetFunctionName(getter, methodName);
+ %FunctionSetName(getter, methodName);
%FunctionRemovePrototype(getter);
%SetNativeFlag(getter);
@@ -251,7 +253,7 @@ function supportedLocalesOf(service, locales, options) {
if (IS_UNDEFINED(options)) {
options = {};
} else {
- options = $toObject(options);
+ options = TO_OBJECT(options);
}
var matcher = options.localeMatcher;
@@ -717,7 +719,7 @@ function initializeLocaleList(locales) {
return freezeArray(seen);
}
- var o = $toObject(locales);
+ var o = TO_OBJECT(locales);
var len = TO_UINT32(o.length);
for (var k = 0; k < len; k++) {
@@ -951,7 +953,7 @@ function initializeCollator(collator, locales, options) {
return new Intl.Collator(locales, options);
}
- return initializeCollator($toObject(this), locales, options);
+ return initializeCollator(TO_OBJECT(this), locales, options);
},
DONT_ENUM
);
@@ -985,7 +987,7 @@ function initializeCollator(collator, locales, options) {
},
DONT_ENUM
);
-SetFunctionName(Intl.Collator.prototype.resolvedOptions, 'resolvedOptions');
+%FunctionSetName(Intl.Collator.prototype.resolvedOptions, 'resolvedOptions');
%FunctionRemovePrototype(Intl.Collator.prototype.resolvedOptions);
%SetNativeFlag(Intl.Collator.prototype.resolvedOptions);
@@ -1005,7 +1007,7 @@ SetFunctionName(Intl.Collator.prototype.resolvedOptions, 'resolvedOptions');
},
DONT_ENUM
);
-SetFunctionName(Intl.Collator.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionSetName(Intl.Collator.supportedLocalesOf, 'supportedLocalesOf');
%FunctionRemovePrototype(Intl.Collator.supportedLocalesOf);
%SetNativeFlag(Intl.Collator.supportedLocalesOf);
@@ -1099,11 +1101,19 @@ function initializeNumberFormat(numberFormat, locales, options) {
var mnid = getNumberOption(options, 'minimumIntegerDigits', 1, 21, 1);
defineWEProperty(internalOptions, 'minimumIntegerDigits', mnid);
- var mnfd = getNumberOption(options, 'minimumFractionDigits', 0, 20, 0);
- defineWEProperty(internalOptions, 'minimumFractionDigits', mnfd);
+ var mnfd = options['minimumFractionDigits'];
+ var mxfd = options['maximumFractionDigits'];
+ if (!IS_UNDEFINED(mnfd) || !internalOptions.style === 'currency') {
+ mnfd = getNumberOption(options, 'minimumFractionDigits', 0, 20, 0);
+ defineWEProperty(internalOptions, 'minimumFractionDigits', mnfd);
+ }
- var mxfd = getNumberOption(options, 'maximumFractionDigits', mnfd, 20, 3);
- defineWEProperty(internalOptions, 'maximumFractionDigits', mxfd);
+ if (!IS_UNDEFINED(mxfd) || !internalOptions.style === 'currency') {
+ mnfd = IS_UNDEFINED(mnfd) ? 0 : mnfd;
+ fallback_limit = (mnfd > 3) ? mnfd : 3;
+ mxfd = getNumberOption(options, 'maximumFractionDigits', mnfd, 20, fallback_limit);
+ defineWEProperty(internalOptions, 'maximumFractionDigits', mxfd);
+ }
var mnsd = options['minimumSignificantDigits'];
var mxsd = options['maximumSignificantDigits'];
@@ -1157,8 +1167,6 @@ function initializeNumberFormat(numberFormat, locales, options) {
internalOptions,
resolved);
- // We can't get information about number or currency style from ICU, so we
- // assume user request was fulfilled.
if (internalOptions.style === 'currency') {
ObjectDefineProperty(resolved, 'currencyDisplay', {value: currencyDisplay,
writable: true});
@@ -1186,7 +1194,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
return new Intl.NumberFormat(locales, options);
}
- return initializeNumberFormat($toObject(this), locales, options);
+ return initializeNumberFormat(TO_OBJECT(this), locales, options);
},
DONT_ENUM
);
@@ -1238,7 +1246,8 @@ function initializeNumberFormat(numberFormat, locales, options) {
},
DONT_ENUM
);
-SetFunctionName(Intl.NumberFormat.prototype.resolvedOptions, 'resolvedOptions');
+%FunctionSetName(Intl.NumberFormat.prototype.resolvedOptions,
+ 'resolvedOptions');
%FunctionRemovePrototype(Intl.NumberFormat.prototype.resolvedOptions);
%SetNativeFlag(Intl.NumberFormat.prototype.resolvedOptions);
@@ -1258,7 +1267,7 @@ SetFunctionName(Intl.NumberFormat.prototype.resolvedOptions, 'resolvedOptions');
},
DONT_ENUM
);
-SetFunctionName(Intl.NumberFormat.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionSetName(Intl.NumberFormat.supportedLocalesOf, 'supportedLocalesOf');
%FunctionRemovePrototype(Intl.NumberFormat.supportedLocalesOf);
%SetNativeFlag(Intl.NumberFormat.supportedLocalesOf);
@@ -1270,7 +1279,7 @@ SetFunctionName(Intl.NumberFormat.supportedLocalesOf, 'supportedLocalesOf');
*/
function formatNumber(formatter, value) {
// Spec treats -0 and +0 as 0.
- var number = $toNumber(value) + 0;
+ var number = ToNumber(value) + 0;
return %InternalNumberFormat(%GetImplFromInitializedIntlObject(formatter),
number);
@@ -1438,7 +1447,7 @@ function toDateTimeOptions(options, required, defaults) {
if (IS_UNDEFINED(options)) {
options = {};
} else {
- options = TO_OBJECT_INLINE(options);
+ options = TO_OBJECT(options);
}
var needsDefault = true;
@@ -1588,7 +1597,7 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
return new Intl.DateTimeFormat(locales, options);
}
- return initializeDateTimeFormat($toObject(this), locales, options);
+ return initializeDateTimeFormat(TO_OBJECT(this), locales, options);
},
DONT_ENUM
);
@@ -1659,8 +1668,8 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
},
DONT_ENUM
);
-SetFunctionName(Intl.DateTimeFormat.prototype.resolvedOptions,
- 'resolvedOptions');
+%FunctionSetName(Intl.DateTimeFormat.prototype.resolvedOptions,
+ 'resolvedOptions');
%FunctionRemovePrototype(Intl.DateTimeFormat.prototype.resolvedOptions);
%SetNativeFlag(Intl.DateTimeFormat.prototype.resolvedOptions);
@@ -1680,7 +1689,7 @@ SetFunctionName(Intl.DateTimeFormat.prototype.resolvedOptions,
},
DONT_ENUM
);
-SetFunctionName(Intl.DateTimeFormat.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionSetName(Intl.DateTimeFormat.supportedLocalesOf, 'supportedLocalesOf');
%FunctionRemovePrototype(Intl.DateTimeFormat.supportedLocalesOf);
%SetNativeFlag(Intl.DateTimeFormat.supportedLocalesOf);
@@ -1695,7 +1704,7 @@ function formatDate(formatter, dateValue) {
if (IS_UNDEFINED(dateValue)) {
dateMs = %DateCurrentTime();
} else {
- dateMs = $toNumber(dateValue);
+ dateMs = ToNumber(dateValue);
}
if (!IsFinite(dateMs)) throw MakeRangeError(kDateRange);
@@ -1808,7 +1817,7 @@ function initializeBreakIterator(iterator, locales, options) {
return new Intl.v8BreakIterator(locales, options);
}
- return initializeBreakIterator($toObject(this), locales, options);
+ return initializeBreakIterator(TO_OBJECT(this), locales, options);
},
DONT_ENUM
);
@@ -1838,8 +1847,8 @@ function initializeBreakIterator(iterator, locales, options) {
},
DONT_ENUM
);
-SetFunctionName(Intl.v8BreakIterator.prototype.resolvedOptions,
- 'resolvedOptions');
+%FunctionSetName(Intl.v8BreakIterator.prototype.resolvedOptions,
+ 'resolvedOptions');
%FunctionRemovePrototype(Intl.v8BreakIterator.prototype.resolvedOptions);
%SetNativeFlag(Intl.v8BreakIterator.prototype.resolvedOptions);
@@ -1860,7 +1869,7 @@ SetFunctionName(Intl.v8BreakIterator.prototype.resolvedOptions,
},
DONT_ENUM
);
-SetFunctionName(Intl.v8BreakIterator.supportedLocalesOf, 'supportedLocalesOf');
+%FunctionSetName(Intl.v8BreakIterator.supportedLocalesOf, 'supportedLocalesOf');
%FunctionRemovePrototype(Intl.v8BreakIterator.supportedLocalesOf);
%SetNativeFlag(Intl.v8BreakIterator.supportedLocalesOf);
@@ -1956,7 +1965,7 @@ function OverrideFunction(object, name, f) {
writeable: true,
configurable: true,
enumerable: false });
- SetFunctionName(f, name);
+ %FunctionSetName(f, name);
%FunctionRemovePrototype(f);
%SetNativeFlag(f);
}
@@ -1989,14 +1998,17 @@ OverrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
* If the form is not one of "NFC", "NFD", "NFKC", or "NFKD", then throw
* a RangeError Exception.
*/
-OverrideFunction(GlobalString.prototype, 'normalize', function(that) {
+
+OverrideFunction(GlobalString.prototype, 'normalize', function() {
if (%_IsConstructCall()) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
+ var s = TO_STRING_INLINE(this);
- var form = GlobalString(%_Arguments(0) || 'NFC');
+ var formArg = %_Arguments(0);
+ var form = IS_UNDEFINED(formArg) ? 'NFC' : TO_STRING_INLINE(formArg);
var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
@@ -2007,7 +2019,7 @@ OverrideFunction(GlobalString.prototype, 'normalize', function(that) {
%_CallFunction(NORMALIZATION_FORMS, ', ', ArrayJoin));
}
- return %StringNormalize(this, normalizationForm);
+ return %StringNormalize(s, normalizationForm);
}
);
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 5a4036627d..70b7a6727b 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -40,7 +40,7 @@
#include "src/ia32/assembler-ia32.h"
#include "src/assembler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
namespace v8 {
namespace internal {
@@ -53,35 +53,25 @@ static const int kNoCodeAgeSequenceLength = 5;
// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
- bool flush_icache = icache_flush_mode != SKIP_ICACHE_FLUSH;
+void RelocInfo::apply(intptr_t delta) {
if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
- if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
- } else if (rmode_ == CODE_AGE_SEQUENCE) {
+ } else if (IsCodeAgeSequence(rmode_)) {
if (*pc_ == kCallOpcode) {
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
- if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
}
- } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
- // Special handling of js_return when a break point is set (call
- // instruction has been inserted).
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
- *p -= delta; // Relocate entry.
- if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
- } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
+ } else if (IsDebugBreakSlot(rmode_) && IsPatchedDebugBreakSlotSequence()) {
// Special handling of a debug break slot when a break point is set (call
// instruction has been inserted).
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+ int32_t* p = reinterpret_cast<int32_t*>(
+ pc_ + Assembler::kPatchDebugBreakSlotAddressOffset);
*p -= delta; // Relocate entry.
- if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
} else if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p += delta; // Relocate entry.
- if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
}
}
@@ -244,17 +234,17 @@ void RelocInfo::set_code_age_stub(Code* stub,
}
-Address RelocInfo::call_address() {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Assembler::target_address_at(pc_ + 1, host_);
+Address RelocInfo::debug_call_address() {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+ Address location = pc_ + Assembler::kPatchDebugBreakSlotAddressOffset;
+ return Assembler::target_address_at(location, host_);
}
-void RelocInfo::set_call_address(Address target) {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Assembler::set_target_address_at(pc_ + 1, host_, target);
+void RelocInfo::set_debug_call_address(Address target) {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+ Address location = pc_ + Assembler::kPatchDebugBreakSlotAddressOffset;
+ Assembler::set_target_address_at(location, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -263,23 +253,6 @@ void RelocInfo::set_call_address(Address target) {
}
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-Object** RelocInfo::call_object_address() {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(pc_ + 1);
-}
-
-
void RelocInfo::WipeOut() {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)) {
@@ -318,11 +291,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- isolate->debug()->has_break_points()) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(this);
} else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
@@ -346,11 +316,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
@@ -502,11 +469,6 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
-Address Assembler::break_address_from_return_address(Address pc) {
- return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
-}
-
-
Displacement Assembler::disp_at(Label* L) {
return Displacement(long_at(L->pos()));
}
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 9066788b1f..2e17fcb007 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -168,9 +168,9 @@ void Displacement::init(Label* L, Type type) {
const int RelocInfo::kApplyMask =
- RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
- 1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE |
- 1 << RelocInfo::DEBUG_BREAK_SLOT | 1 << RelocInfo::CODE_AGE_SEQUENCE;
+ RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
+ 1 << RelocInfo::INTERNAL_REFERENCE | 1 << RelocInfo::CODE_AGE_SEQUENCE |
+ RelocInfo::kDebugBreakSlotMask;
bool RelocInfo::IsCodedSpecially() {
@@ -2325,26 +2325,6 @@ void Assembler::movdqu(XMMRegister dst, const Operand& src) {
}
-void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
- DCHECK(IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x38);
- EMIT(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movntdq(const Operand& dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xE7);
- emit_sse_operand(src, dst);
-}
-
-
void Assembler::prefetch(const Operand& src, int level) {
DCHECK(is_uint2(level));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index e77ef28ce7..3daa294aae 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -533,9 +533,6 @@ class Assembler : public AssemblerBase {
// of that call in the instruction stream.
inline static Address target_address_from_return_address(Address pc);
- // Return the code target address of the patch debug break slot
- inline static Address break_address_from_return_address(Address pc);
-
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -553,21 +550,16 @@ class Assembler : public AssemblerBase {
// Distance between the address of the code target in the call instruction
// and the return address
static const int kCallTargetAddressOffset = kPointerSize;
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- static const int kPatchReturnSequenceAddressOffset = 1; // JMP imm32.
-
- // Distance between start of patched debug break slot and the emitted address
- // to jump to.
- static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32.
static const int kCallInstructionLength = 5;
- static const int kPatchDebugBreakSlotReturnOffset = kPointerSize;
- static const int kJSReturnSequenceLength = 6;
// The debug break slot must be able to contain a call instruction.
static const int kDebugBreakSlotLength = kCallInstructionLength;
+ // Distance between start of patched debug break slot and the emitted address
+ // to jump to.
+ static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32.
+
// One byte opcode for test al, 0xXX.
static const byte kTestAlByte = 0xA8;
// One byte opcode for nop.
@@ -1085,10 +1077,6 @@ class Assembler : public AssemblerBase {
}
void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
- // Parallel XMM operations.
- void movntdqa(XMMRegister dst, const Operand& src);
- void movntdq(const Operand& dst, XMMRegister src);
-
// AVX instructions
void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmadd132sd(dst, src1, Operand(src2));
@@ -1433,11 +1421,11 @@ class Assembler : public AssemblerBase {
return pc_offset() - label->pos();
}
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
+ // Mark generator continuation.
+ void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot();
+ void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index ef9f30d715..7a055bd876 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -2,14 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/code-factory.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/ia32/frames-ia32.h"
namespace v8 {
namespace internal {
@@ -100,45 +99,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
-static void Generate_Runtime_NewObject(MacroAssembler* masm,
- bool create_memento,
- Register original_constructor,
- Label* count_incremented,
- Label* allocated) {
- int offset = 0;
- if (create_memento) {
- // Get the cell or allocation site.
- __ mov(edi, Operand(esp, kPointerSize * 2));
- __ push(edi);
- offset = kPointerSize;
- }
-
- // Must restore esi (context) and edi (constructor) before calling
- // runtime.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(edi, Operand(esp, offset));
- __ push(edi);
- __ push(original_constructor);
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- } else {
- __ CallRuntime(Runtime::kNewObject, 2);
- }
- __ mov(ebx, eax); // store result in ebx
-
- // Runtime_NewObjectWithAllocationSite increments allocation count.
- // Skip the increment.
- if (create_memento) {
- __ jmp(count_incremented);
- } else {
- __ jmp(allocated);
- }
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- eax: number of arguments
@@ -154,40 +116,27 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
- if (create_memento) {
- __ AssertUndefinedOrAllocationSite(ebx);
- __ push(ebx);
- }
-
// Preserve the incoming parameters on the stack.
+ __ AssertUndefinedOrAllocationSite(ebx);
+ __ push(ebx);
__ SmiTag(eax);
__ push(eax);
__ push(edi);
- if (use_new_target) {
- __ push(edx);
- }
-
- __ cmp(edx, edi);
- Label normal_new;
- Label count_incremented;
- Label allocated;
- __ j(equal, &normal_new);
-
- // Original constructor and function are different.
- Generate_Runtime_NewObject(masm, create_memento, edx, &count_incremented,
- &allocated);
- __ bind(&normal_new);
+ __ push(edx);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
- Label rt_call;
+ Label rt_call, allocated;
if (FLAG_inline_new) {
- Label undo_allocation;
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
__ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
__ j(not_equal, &rt_call);
+ // Fall back to runtime if the original constructor and function differ.
+ __ cmp(edx, edi);
+ __ j(not_equal, &rt_call);
+
// Verified that the constructor is a JSFunction.
// Load the initial map and verify that it is in fact a map.
// edi: constructor
@@ -224,12 +173,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ j(not_equal, &allocate);
__ push(eax);
+ __ push(edx);
__ push(edi);
__ push(edi); // constructor
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ pop(edi);
+ __ pop(edx);
__ pop(eax);
__ mov(esi, Map::kSlackTrackingCounterEnd - 1);
@@ -272,8 +223,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ j(less, &no_inobject_slack_tracking);
// Allocate object with a slack.
- __ movzx_b(esi,
- FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+ __ movzx_b(
+ esi,
+ FieldOperand(
+ eax, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
+ __ movzx_b(eax, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+ __ sub(esi, eax);
__ lea(esi,
Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
// esi: offset of first field after pre-allocated fields
@@ -298,7 +253,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ mov(Operand(esi, AllocationMemento::kMapOffset),
factory->allocation_memento_map());
// Get the cell or undefined.
- __ mov(edx, Operand(esp, kPointerSize*2));
+ __ mov(edx, Operand(esp, 3 * kPointerSize));
+ __ AssertUndefinedOrAllocationSite(edx);
__ mov(Operand(esi, AllocationMemento::kAllocationSiteOffset),
edx);
} else {
@@ -306,95 +262,52 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
+ // and jump into the continuation code at any time from now on.
+ // ebx: JSObject (untagged)
__ or_(ebx, Immediate(kHeapObjectTag));
- // Check if a non-empty properties array is needed.
- // Allocate and initialize a FixedArray if it is.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- // Calculate the total number of properties described by the map.
- __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
- __ movzx_b(ecx,
- FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
- __ add(edx, ecx);
- // Calculate unused properties past the end of the in-object properties.
- __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
- __ sub(edx, ecx);
- // Done if no extra properties are to be allocated.
- __ j(zero, &allocated);
- __ Assert(positive, kPropertyAllocationCountFailed);
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // ebx: JSObject
- // edi: start of next object (will be start of FixedArray)
- // edx: number of elements in properties array
- __ Allocate(FixedArray::kHeaderSize,
- times_pointer_size,
- edx,
- REGISTER_VALUE_IS_INT32,
- edi,
- ecx,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
-
- // Initialize the FixedArray.
- // ebx: JSObject
- // edi: FixedArray
- // edx: number of elements
- // ecx: start of next object
- __ mov(eax, factory->fixed_array_map());
- __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
- __ SmiTag(edx);
- __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
-
- // Initialize the fields to undefined.
- // ebx: JSObject
- // edi: FixedArray
- // ecx: start of next object
- __ mov(edx, factory->undefined_value());
- __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
- __ InitializeFieldsWithFiller(eax, ecx, edx);
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // ebx: JSObject
- // edi: FixedArray
- __ or_(edi, Immediate(kHeapObjectTag)); // add the heap tag
- __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
-
-
// Continue with JSObject being successfully allocated
- // ebx: JSObject
+ // ebx: JSObject (tagged)
__ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // ebx: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(ebx);
}
// Allocate the new receiver object using the runtime call.
+ // edx: original constructor
__ bind(&rt_call);
- Generate_Runtime_NewObject(masm, create_memento, edi, &count_incremented,
- &allocated);
+ int offset = kPointerSize;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ mov(edi, Operand(esp, kPointerSize * 3));
+ __ push(edi); // argument 1: allocation site
+ offset += kPointerSize;
+ }
+
+ // Must restore esi (context) and edi (constructor) before calling
+ // runtime.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(edi, Operand(esp, offset));
+ __ push(edi); // argument 2/1: constructor function
+ __ push(edx); // argument 3/2: original constructor
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 2);
+ }
+ __ mov(ebx, eax); // store result in ebx
+
+ // Runtime_NewObjectWithAllocationSite increments allocation count.
+ // Skip the increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// New object allocated.
// ebx: newly allocated object
__ bind(&allocated);
if (create_memento) {
- int offset = (use_new_target ? 3 : 2) * kPointerSize;
- __ mov(ecx, Operand(esp, offset));
+ __ mov(ecx, Operand(esp, 3 * kPointerSize));
__ cmp(ecx, masm->isolate()->factory()->undefined_value());
__ j(equal, &count_incremented);
// ecx is an AllocationSite. We are creating a memento from it, so we
@@ -405,9 +318,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore the parameters.
- if (use_new_target) {
- __ pop(edx); // new.target
- }
+ __ pop(edx); // new.target
__ pop(edi); // Constructor function.
// Retrieve smi-tagged arguments count from the stack.
@@ -416,9 +327,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Push new.target onto the construct frame. This is stored just below the
// receiver on the stack.
- if (use_new_target) {
- __ push(edx);
- }
+ __ push(edx);
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
@@ -452,9 +361,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- // TODO(arv): Remove the "!use_new_target" before supporting optimization
- // of functions that reference new.target
- if (!is_api_function && !use_new_target) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@@ -482,8 +389,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Restore the arguments count and leave the construct frame. The arguments
// count is stored below the reciever and the new.target.
__ bind(&exit);
- int offset = (use_new_target ? 2 : 1) * kPointerSize;
- __ mov(ebx, Operand(esp, offset));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
// Leave construct frame.
}
@@ -499,17 +405,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, true, false);
}
@@ -521,12 +422,13 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// -- edx: original constructor
// -----------------------------------
- // TODO(dslomov): support pretenuring
- CHECK(!FLAG_pretenuring_call_new);
-
{
FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
+ // Preserve allocation site.
+ __ AssertUndefinedOrAllocationSite(ebx);
+ __ push(ebx);
+
// Preserve actual arguments count.
__ SmiTag(eax);
__ push(eax);
@@ -705,6 +607,156 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// o edi: the JS function object being called
+// o esi: our context
+// o ebp: the caller's frame pointer
+// o esp: stack pointer (pointing to return address)
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-ia32.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS function.
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into edi (InterpreterBytecodeRegister).
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(kInterpreterBytecodeArrayRegister,
+ FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+ __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+ eax);
+ __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size from the BytecodeArray object.
+ __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ __ mov(ecx, esp);
+ __ sub(ecx, ebx);
+ ExternalReference stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ cmp(ecx, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ Label loop_header;
+ Label loop_check;
+ __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
+ __ jmp(&loop_check);
+ __ bind(&loop_header);
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ __ push(eax);
+ // Continue loop if not done.
+ __ bind(&loop_check);
+ __ sub(ebx, Immediate(kPointerSize));
+ __ j(greater_equal, &loop_header);
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Support profiler (specifically profiling_counter).
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Allow simulator stop operations if FLAG_stop_at is set.
+ // - Deal with sloppy mode functions which need to replace the
+ // receiver with the global proxy when called as functions (without an
+ // explicit receiver object).
+ // - Code aging of the BytecodeArray object.
+ // - Supporting FLAG_trace.
+ //
+ // The following items are also not done here, and will probably be done using
+ // explicit bytecodes instead:
+ // - Allocating a new local context if applicable.
+ // - Setting up a local binding to the this function, which is used in
+ // derived constructors with super calls.
+ // - Setting new.target if required.
+ // - Dealing with REST parameters (only if
+ // https://codereview.chromium.org/1235153006 doesn't land by then).
+ // - Dealing with argument objects.
+
+ // Perform stack guard check.
+ {
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm->isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ bind(&ok);
+ }
+
+ // Load accumulator, register file, bytecode offset, dispatch table into
+ // registers.
+ __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ mov(kInterpreterRegisterFileRegister, ebp);
+ __ sub(
+ kInterpreterRegisterFileRegister,
+ Immediate(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ // Since the dispatch table root might be set after builtins are generated,
+ // load directly from the roots table.
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ add(kInterpreterDispatchTableRegister,
+ Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // TODO(rmcilroy) Push our context as a stack located parameter of the
+ // bytecode handler.
+
+ // Dispatch to the first bytecode handler for the function.
+ __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, times_1, 0));
+ __ mov(esi, Operand(kInterpreterDispatchTableRegister, esi,
+ times_pointer_size, 0));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ add(esi, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(esi);
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // The return value is in accumulator, which is already in rax.
+
+ // Leave the frame (also dropping the register file).
+ __ leave();
+ // Return droping receiver + arguments.
+ // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+ __ Ret(1 * kPointerSize, ecx);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@@ -961,8 +1013,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ SmiTag(eax);
__ push(eax);
- __ push(ebx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(eax, ebx);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mov(ebx, eax);
__ Move(edx, Immediate(0)); // restore
@@ -1056,6 +1109,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int vectorOffset,
const int argumentsOffset,
const int indexOffset,
const int limitOffset) {
@@ -1071,12 +1125,9 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ mov(receiver, Operand(ebp, argumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
- FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
- Handle<TypeFeedbackVector> feedback_vector =
- masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
- int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
- __ mov(slot, Immediate(Smi::FromInt(index)));
- __ mov(vector, Immediate(feedback_vector));
+ int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
+ __ mov(slot, Immediate(Smi::FromInt(slot_index)));
+ __ mov(vector, Operand(ebp, vectorOffset));
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ call(ic, RelocInfo::CODE_TARGET);
@@ -1124,6 +1175,13 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
static const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
static const int kFunctionOffset = kReceiverOffset + kPointerSize;
+ static const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+
+ // Push the vector.
+ __ mov(edi, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(edi, FieldOperand(edi, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ push(edi);
__ push(Operand(ebp, kFunctionOffset)); // push this
__ push(Operand(ebp, kArgumentsOffset)); // push arguments
@@ -1136,8 +1194,7 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
Generate_CheckStackOverflow(masm, kFunctionOffset, kEaxIsSmiTagged);
// Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
__ push(eax); // limit
__ push(Immediate(0)); // index
@@ -1182,8 +1239,9 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ j(above_equal, &push_receiver);
__ bind(&call_to_object);
- __ push(ebx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(eax, ebx);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mov(ebx, eax);
__ jmp(&push_receiver);
@@ -1197,8 +1255,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ push(ebx);
// Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// Call the function.
Label call_proxy;
@@ -1247,6 +1305,13 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
static const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
static const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
static const int kFunctionOffset = kArgumentsOffset + kPointerSize;
+ static const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+
+ // Push the vector.
+ __ mov(edi, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(edi, FieldOperand(edi, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ push(edi);
// If newTarget is not supplied, set it to constructor
Label validate_arguments;
@@ -1266,29 +1331,26 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
Generate_CheckStackOverflow(masm, kFunctionOffset, kEaxIsSmiTagged);
// Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
__ Push(eax); // limit
__ push(Immediate(0)); // index
- // Push newTarget and callee functions
- __ push(Operand(ebp, kNewTargetOffset));
+ // Push the constructor function as callee.
__ push(Operand(ebp, kFunctionOffset));
// Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// Use undefined feedback vector
__ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
__ mov(edi, Operand(ebp, kFunctionOffset));
+ __ mov(ecx, Operand(ebp, kNewTargetOffset));
// Call the function.
CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
__ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
-
// Leave internal frame.
}
// remove this, target, arguments, and newTarget
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 7079dc9f77..53e9e96cdb 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -2,20 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/ia32/frames-ia32.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
-#include "src/jsregexp.h"
-#include "src/regexp-macro-assembler.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -37,7 +36,7 @@ static void InitializeArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -56,7 +55,7 @@ static void InitializeInternalArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -688,9 +687,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ push(scratch); // return address
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadElementWithInterceptor), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -789,7 +786,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ pop(ebx); // Return address.
__ push(edx);
__ push(ebx);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments, 1, 1);
}
@@ -1691,7 +1688,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
}
// Test for NaN. Compare heap numbers in a general way,
- // to hanlde NaNs correctly.
+ // to handle NaNs correctly.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(isolate()->factory()->heap_number_map()));
__ j(equal, &generic_heap_number_comparison, Label::kNear);
@@ -1704,6 +1701,9 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpb(ecx, static_cast<uint8_t>(SYMBOL_TYPE));
__ j(equal, &runtime_call, Label::kFar);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ cmpb(ecx, static_cast<uint8_t>(SIMD128_VALUE_TYPE));
+ __ j(equal, &runtime_call, Label::kFar);
if (is_strong(strength())) {
// We have already tested for smis and heap numbers, so if both
// arguments are not strings we must proceed to the slow case.
@@ -1892,59 +1892,83 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ push(eax);
// Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc == equal) {
- builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == equal && strict()) {
+ __ push(ecx);
+ __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
} else {
- builtin =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
- }
+ Builtins::JavaScript builtin;
+ if (cc == equal) {
+ builtin = Builtins::EQUALS;
+ } else {
+ builtin =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
+ __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ }
- // Restore return address on the stack.
- __ push(ecx);
+ // Restore return address on the stack.
+ __ push(ecx);
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+ }
__ bind(&miss);
GenerateMiss(masm);
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
+ bool is_super) {
// eax : number of arguments to the construct function
- // ebx : Feedback vector
+ // ebx : feedback vector
// edx : slot in feedback vector (Smi)
// edi : the function to call
- FrameScope scope(masm, StackFrame::INTERNAL);
+ // esp[0]: original receiver (for IsSuperConstructorCall)
+ if (is_super) {
+ __ pop(ecx);
+ }
- // Number-of-arguments register must be smi-tagged to call out.
- __ SmiTag(eax);
- __ push(eax);
- __ push(edi);
- __ push(edx);
- __ push(ebx);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallStub(stub);
+ // Number-of-arguments register must be smi-tagged to call out.
+ __ SmiTag(eax);
+ __ push(eax);
+ __ push(edi);
+ __ push(edx);
+ __ push(ebx);
+ if (is_super) {
+ __ push(ecx);
+ }
- __ pop(ebx);
- __ pop(edx);
- __ pop(edi);
- __ pop(eax);
- __ SmiUntag(eax);
+ __ CallStub(stub);
+
+ if (is_super) {
+ __ pop(ecx);
+ }
+ __ pop(ebx);
+ __ pop(edx);
+ __ pop(edi);
+ __ pop(eax);
+ __ SmiUntag(eax);
+ }
+
+ if (is_super) {
+ __ push(ecx);
+ }
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
+static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// eax : number of arguments to the construct function
- // ebx : Feedback vector
+ // ebx : feedback vector
// edx : slot in feedback vector (Smi)
// edi : the function to call
+ // esp[0]: original receiver (for IsSuperConstructorCall)
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
@@ -2013,14 +2037,14 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(isolate);
- CallStubInRecordCallTarget(masm, &create_stub);
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ jmp(&done);
__ bind(&not_array_function);
}
CreateWeakCellStub create_stub(isolate);
- CallStubInRecordCallTarget(masm, &create_stub);
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ bind(&done);
}
@@ -2073,8 +2097,8 @@ static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
// Wrap the receiver and patch it back onto the stack.
{ FrameScope frame_scope(masm, StackFrame::INTERNAL);
__ push(edi);
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ pop(edi);
}
__ mov(Operand(esp, (argc + 1) * kPointerSize), eax);
@@ -2144,11 +2168,15 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// eax : number of arguments
// ebx : feedback vector
- // edx : (only if ebx is not the megamorphic symbol) slot in feedback
- // vector (Smi)
+ // ecx : original constructor (for IsSuperConstructorCall)
+ // edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
Label slow, non_function_call;
+ if (IsSuperConstructorCall()) {
+ __ push(ecx);
+ }
+
// Check that function is not a smi.
__ JumpIfSmi(edi, &non_function_call);
// Check that function is a JSFunction.
@@ -2156,7 +2184,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ GenerateRecordCallTarget(masm, IsSuperConstructorCall());
if (FLAG_pretenuring_call_new) {
// Put the AllocationSite from the feedback vector into ebx.
@@ -2181,7 +2209,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
if (IsSuperConstructorCall()) {
- __ mov(edx, Operand(esp, eax, times_pointer_size, 2 * kPointerSize));
+ __ pop(edx);
} else {
// Pass original constructor to construct stub.
__ mov(edx, edi);
@@ -2198,6 +2226,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// edi: called object
// eax: number of arguments
// ecx: object map
+ // esp[0]: original receiver (for IsSuperConstructorCall)
Label do_call;
__ bind(&slow);
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
@@ -2208,6 +2237,9 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ bind(&non_function_call);
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
+ if (IsSuperConstructorCall()) {
+ __ Drop(1);
+ }
// Set expected number of arguments to zero (not changing eax).
__ Move(ebx, Immediate(0));
Handle<Code> arguments_adaptor =
@@ -2435,11 +2467,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ push(edx);
// Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
-
- ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
- __ CallExternalReference(miss, 3);
+ Runtime::FunctionId id = GetICState() == DEFAULT
+ ? Runtime::kCallIC_Miss
+ : Runtime::kCallIC_Customization_Miss;
+ __ CallRuntime(id, 3);
// Move result to edi and exit the internal frame.
__ mov(edi, eax);
@@ -3026,10 +3057,9 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
- __ test(code_,
- Immediate(kSmiTagMask |
- ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
+ DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
+ __ test(code_, Immediate(kSmiTagMask |
+ ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
__ j(not_zero, &slow_case_);
Factory* factory = masm->isolate()->factory();
@@ -3305,7 +3335,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
__ bind(&single_char);
// eax: string
@@ -3528,7 +3558,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3840,7 +3870,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ bind(&miss);
@@ -3895,15 +3925,13 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
- ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
- isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(edx); // Preserve edx and eax.
__ push(eax);
__ push(edx); // And also use them as the arguments.
__ push(eax);
__ push(Immediate(Smi::FromInt(op())));
- __ CallExternalReference(miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss, 3);
// Compute the entry point of the rewritten stub.
__ lea(edi, FieldOperand(eax, Code::kHeaderSize));
__ pop(eax);
@@ -3944,11 +3972,11 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
NameDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
Register entity_name = r0;
// Having undefined at this place means the name is not contained.
- DCHECK_EQ(kSmiTagSize, 1);
+ STATIC_ASSERT(kSmiTagSize == 1);
__ mov(entity_name, Operand(properties, index, times_half_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ cmp(entity_name, masm->isolate()->factory()->undefined_value());
@@ -4016,7 +4044,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ and_(r0, r1);
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
// Check if the key is identical to the name.
@@ -4079,11 +4107,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ and_(scratch, Operand(esp, 0));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ lea(index(), Operand(scratch, scratch, times_2, 0)); // index *= 3.
// Having undefined at this place means the name is not contained.
- DCHECK_EQ(kSmiTagSize, 1);
+ STATIC_ASSERT(kSmiTagSize == 1);
__ mov(scratch, Operand(dictionary(), index(), times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ cmp(scratch, isolate()->factory()->undefined_value());
@@ -4581,8 +4609,8 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ push(vector);
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::LOAD_IC, code_flags, false, receiver, name, vector, scratch);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
+ receiver, name, vector, scratch);
__ pop(vector);
__ pop(slot);
@@ -4794,12 +4822,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// esp[4] - last argument
Label normal_sequence;
if (mode == DONT_OVERRIDE) {
- DCHECK(FAST_SMI_ELEMENTS == 0);
- DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
- DCHECK(FAST_ELEMENTS == 2);
- DCHECK(FAST_HOLEY_ELEMENTS == 3);
- DCHECK(FAST_DOUBLE_ELEMENTS == 4);
- DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// is the low bit set? If so, we are holey and that is good.
__ test_b(edx, 1);
@@ -5092,6 +5120,161 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context_reg = esi;
+ Register slot_reg = ebx;
+ Register result_reg = eax;
+ Label slow_case;
+
+ // Go up context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ mov(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ context_reg = result_reg;
+ }
+
+ // Load the PropertyCell value at the specified slot.
+ __ mov(result_reg, ContextOperand(context_reg, slot_reg));
+ __ mov(result_reg, FieldOperand(result_reg, PropertyCell::kValueOffset));
+
+ // Check that value is not the_hole.
+ __ CompareRoot(result_reg, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &slow_case, Label::kNear);
+ __ Ret();
+
+ // Fallback to the runtime.
+ __ bind(&slow_case);
+ __ SmiTag(slot_reg);
+ __ Pop(result_reg); // Pop return address.
+ __ Push(slot_reg);
+ __ Push(result_reg); // Push return address.
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+}
+
+
+void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context_reg = esi;
+ Register slot_reg = ebx;
+ Register value_reg = eax;
+ Register cell_reg = edi;
+ Register cell_details_reg = edx;
+ Register cell_value_reg = ecx;
+ Label fast_heapobject_case, fast_smi_case, slow_case;
+
+ if (FLAG_debug_code) {
+ __ CompareRoot(value_reg, Heap::kTheHoleValueRootIndex);
+ __ Check(not_equal, kUnexpectedValue);
+ }
+
+ // Go up context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ mov(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ context_reg = cell_reg;
+ }
+
+ // Load the PropertyCell at the specified slot.
+ __ mov(cell_reg, ContextOperand(context_reg, slot_reg));
+
+ // Load PropertyDetails for the cell (actually only the cell_type and kind).
+ __ mov(cell_details_reg,
+ FieldOperand(cell_reg, PropertyCell::kDetailsOffset));
+ __ SmiUntag(cell_details_reg);
+ __ and_(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::kMask |
+ PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask));
+
+ // Check if PropertyCell holds mutable data.
+ Label not_mutable_data;
+ __ cmp(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kMutable) |
+ PropertyDetails::KindField::encode(kData)));
+ __ j(not_equal, &not_mutable_data);
+ __ JumpIfSmi(value_reg, &fast_smi_case);
+ __ bind(&fast_heapobject_case);
+ __ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
+ __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
+ cell_details_reg, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // RecordWriteField clobbers the value register, so we need to reload.
+ __ mov(value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
+ __ Ret();
+ __ bind(&not_mutable_data);
+
+ // Check if PropertyCell value matches the new value (relevant for Constant,
+ // ConstantType and Undefined cells).
+ Label not_same_value;
+ __ mov(cell_value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
+ __ cmp(cell_value_reg, value_reg);
+ __ j(not_equal, &not_same_value,
+ FLAG_debug_code ? Label::kFar : Label::kNear);
+ // Make sure the PropertyCell is not marked READ_ONLY.
+ __ test(cell_details_reg,
+ Immediate(PropertyDetails::kAttributesReadOnlyMask));
+ __ j(not_zero, &slow_case);
+ if (FLAG_debug_code) {
+ Label done;
+ // This can only be true for Constant, ConstantType and Undefined cells,
+ // because we never store the_hole via this stub.
+ __ cmp(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstant) |
+ PropertyDetails::KindField::encode(kData)));
+ __ j(equal, &done);
+ __ cmp(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+ __ j(equal, &done);
+ __ cmp(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kUndefined) |
+ PropertyDetails::KindField::encode(kData)));
+ __ Check(equal, kUnexpectedValue);
+ __ bind(&done);
+ }
+ __ Ret();
+ __ bind(&not_same_value);
+
+ // Check if PropertyCell contains data with constant type (and is not
+ // READ_ONLY).
+ __ cmp(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+ __ j(not_equal, &slow_case, Label::kNear);
+
+ // Now either both old and new values must be SMIs or both must be heap
+ // objects with same map.
+ Label value_is_heap_object;
+ __ JumpIfNotSmi(value_reg, &value_is_heap_object, Label::kNear);
+ __ JumpIfNotSmi(cell_value_reg, &slow_case, Label::kNear);
+ // Old and new values are SMIs, no need for a write barrier here.
+ __ bind(&fast_smi_case);
+ __ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
+ __ Ret();
+ __ bind(&value_is_heap_object);
+ __ JumpIfSmi(cell_value_reg, &slow_case, Label::kNear);
+ Register cell_value_map_reg = cell_value_reg;
+ __ mov(cell_value_map_reg,
+ FieldOperand(cell_value_reg, HeapObject::kMapOffset));
+ __ cmp(cell_value_map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
+ __ j(equal, &fast_heapobject_case);
+
+ // Fallback to the runtime.
+ __ bind(&slow_case);
+ __ SmiTag(slot_reg);
+ __ Pop(cell_reg); // Pop return address.
+ __ Push(slot_reg);
+ __ Push(value_reg);
+ __ Push(cell_reg); // Push return address.
+ __ TailCallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2, 1);
+}
+
+
// Generates an Operand for saving parameters after PrepareCallApiFunction.
static Operand ApiParameterOperand(int index) {
return Operand(esp, index * kPointerSize);
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 1fc42e0a8e..ae8cbeb90f 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/codegen.h"
diff --git a/deps/v8/src/ia32/cpu-ia32.cc b/deps/v8/src/ia32/cpu-ia32.cc
index 0b5c47b548..8de6d1eeb1 100644
--- a/deps/v8/src/ia32/cpu-ia32.cc
+++ b/deps/v8/src/ia32/cpu-ia32.cc
@@ -8,8 +8,6 @@
#include "src/third_party/valgrind/valgrind.h"
#endif
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/assembler.h"
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
deleted file mode 100644
index 5666cf4d22..0000000000
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/codegen.h"
-#include "src/debug.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard int3 instructions can be added if required.
-void PatchCodeWithCall(Address pc, Address target, int guard_bytes) {
- // Call instruction takes up 5 bytes and int3 takes up one byte.
- static const int kCallCodeSize = 5;
- int code_size = kCallCodeSize + guard_bytes;
-
- // Create a code patcher.
- CodePatcher patcher(pc, code_size);
-
-// Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_codesize;
- patcher.masm()->bind(&check_codesize);
-#endif
-
- // Patch the code.
- patcher.masm()->call(target, RelocInfo::NONE32);
-
- // Check that the size of the code generated is as expected.
- DCHECK_EQ(kCallCodeSize,
- patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-
- // Add the requested number of int3 instructions after the call.
- DCHECK_GE(guard_bytes, 0);
- for (int i = 0; i < guard_bytes; i++) {
- patcher.masm()->int3();
- }
-
- CpuFeatures::FlushICache(pc, code_size);
-}
-
-
-// Patch the JS frame exit code with a debug break call. See
-// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-ia32.cc
-// for the precise return instructions sequence.
-void BreakLocation::SetDebugBreakAtReturn() {
- DCHECK(Assembler::kJSReturnSequenceLength >=
- Assembler::kCallInstructionLength);
- PatchCodeWithCall(
- pc(), debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
- Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
-}
-
-
-void BreakLocation::SetDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- Isolate* isolate = debug_info_->GetIsolate();
- PatchCodeWithCall(
- pc(), isolate->builtins()->Slot_DebugBreak()->entry(),
- Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs,
- bool convert_call_to_jmp) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load padding words on stack.
- for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
- __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingValue)));
- }
- __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- DCHECK((object_regs & ~kJSCallerSaved) == 0);
- DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
- DCHECK((object_regs & non_object_regs) == 0);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
- }
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ test(reg, Immediate(0xc0000000));
- __ Assert(zero, kUnableToEncodeValueAsSmi);
- }
- __ SmiTag(reg);
- __ push(reg);
- }
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ Move(eax, Immediate(0)); // No arguments.
- __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(masm->isolate(), 1);
- __ CallStub(&ceb);
-
- // Automatically find register that could be used after register restore.
- // We need one register for padding skip instructions.
- Register unused_reg = { -1 };
-
- // Restore the register values containing object pointers from the
- // expression stack.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if (FLAG_debug_code) {
- __ Move(reg, Immediate(kDebugZapValue));
- }
- bool taken = reg.code() == esi.code();
- if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
- taken = true;
- }
- if ((non_object_regs & (1 << r)) != 0) {
- __ pop(reg);
- __ SmiUntag(reg);
- taken = true;
- }
- if (!taken) {
- unused_reg = reg;
- }
- }
-
- DCHECK(unused_reg.code() != -1);
-
- // Read current padding counter and skip corresponding number of words.
- __ pop(unused_reg);
- // We divide stored value by 2 (untagging) and multiply it by word's size.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0));
-
- // Get rid of the internal frame.
- }
-
- // If this call did not replace a call but patched other code then there will
- // be an unwanted return address left on the stack. Here we get rid of that.
- if (convert_call_to_jmp) {
- __ add(esp, Immediate(kPointerSize));
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ jmp(Operand::StaticVariable(after_break_target));
-}
-
-
-void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallICStub
- // ----------- S t a t e -------------
- // -- edx : type feedback slot (smi)
- // -- edi : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, edx.bit() | edi.bit(),
- 0, false);
-}
-
-
-void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // Register state just before return from JS function (from codegen-ia32.cc).
- // ----------- S t a t e -------------
- // -- eax: return value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
-}
-
-
-void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-ia32.cc).
- // ----------- S t a t e -------------
- // -- edi: function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, edi.bit(), 0, false);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallConstructStub (from code-stubs-ia32.cc).
- // eax is the actual number of arguments not encoded as a smi see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- eax: number of arguments (not smi)
- // -- edi: constructor function
- // -----------------------------------
- // The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
- MacroAssembler* masm) {
- // Register state for CallConstructStub (from code-stubs-ia32.cc).
- // eax is the actual number of arguments not encoded as a smi see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- eax: number of arguments (not smi)
- // -- ebx: feedback array
- // -- edx: feedback slot (smi)
- // -- edi: constructor function
- // -----------------------------------
- // The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(),
- eax.bit(), false);
-}
-
-
-void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction.
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- __ Nop(Assembler::kDebugBreakSlotLength);
- DCHECK_EQ(Assembler::kDebugBreakSlotLength,
- masm->SizeOfCodeGeneratedSince(&check_codesize));
-}
-
-
-void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
- Generate_DebugBreakCallHelper(masm, 0, 0, true);
-}
-
-
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->ret(0);
-}
-
-
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
-
- // We do not know our frame height, but set esp based on ebp.
- __ lea(esp, Operand(ebp, -1 * kPointerSize));
-
- __ pop(edi); // Function.
- __ pop(ebp);
-
- // Load context from the function.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Get function code.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
-
- // Re-run JSFunction, edi is function, esi is context.
- __ jmp(edx);
-}
-
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 46985793b8..d804f630ea 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/codegen.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/ia32/frames-ia32.h"
#include "src/safepoint-table.h"
namespace v8 {
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 05cb1d5e25..935b22d900 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -6,8 +6,6 @@
#include <stdarg.h>
#include <stdio.h>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/disasm.h"
@@ -1618,11 +1616,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
} else if (*data == 0x2A) {
// movntdqa
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movntdqa %s,", NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
+ UnimplementedInstruction();
} else {
UnimplementedInstruction();
}
@@ -1827,9 +1821,8 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
if (mod == 3) {
- AppendToBuffer("movntdq ");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ // movntdq
+ UnimplementedInstruction();
} else {
UnimplementedInstruction();
}
diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc
index a9c47274d6..576a1c142a 100644
--- a/deps/v8/src/ia32/frames-ia32.cc
+++ b/deps/v8/src/ia32/frames-ia32.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/assembler.h"
diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h
index fddace732e..1d200c04b0 100644
--- a/deps/v8/src/ia32/frames-ia32.h
+++ b/deps/v8/src/ia32/frames-ia32.h
@@ -79,12 +79,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
} } // namespace v8::internal
#endif // V8_IA32_FRAMES_IA32_H_
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index 4eefa94510..4724d1eb44 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/interface-descriptors.h"
@@ -32,12 +30,16 @@ const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return edi; }
const Register VectorStoreICDescriptor::VectorRegister() { return ebx; }
-const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
+const Register StoreTransitionDescriptor::MapRegister() {
+ return FLAG_vector_stores ? no_reg : ebx;
+}
-const Register ElementTransitionAndStoreDescriptor::MapRegister() {
- return ebx;
-}
+const Register LoadGlobalViaContextDescriptor::SlotRegister() { return ebx; }
+
+
+const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
+const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
const Register InstanceofDescriptor::left() { return eax; }
@@ -63,6 +65,20 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
+void StoreTransitionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ MapRegister()};
+
+ // When FLAG_vector_stores is true, we want to pass the map register on the
+ // stack instead of in a register.
+ DCHECK(FLAG_vector_stores || !MapRegister().is(no_reg));
+
+ int register_count = FLAG_vector_stores ? 3 : 4;
+ data->InitializePlatformSpecific(register_count, registers);
+}
+
+
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ebx};
@@ -85,6 +101,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
}
+// static
+const Register ToObjectDescriptor::ReceiverRegister() { return eax; }
+
+
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {eax};
@@ -159,12 +179,12 @@ void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
// ebx : feedback vector
- // edx : (only if ebx is not the megamorphic symbol) slot in feedback
- // vector (Smi)
+ // ecx : original constructor (for IsSuperConstructorCall)
+ // edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {eax, edi, ebx};
+ Register registers[] = {eax, edi, ecx, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
@@ -332,11 +352,22 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
+void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ edi, // math rounding function
+ edx, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void MathRoundVariantCallFromOptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
edi, // math rounding function
edx, // vector slot id
+ ebx // type vector
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 24076ecd06..71ad8387a0 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/base/bits.h"
@@ -13,6 +11,7 @@
#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
#include "src/hydrogen-osr.h"
+#include "src/ia32/frames-ia32.h"
#include "src/ia32/lithium-codegen-ia32.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
@@ -133,7 +132,7 @@ bool LCodeGen::GeneratePrologue() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
@@ -541,6 +540,10 @@ int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
const Representation& r) const {
HConstant* constant = chunk_->LookupConstant(op);
+ if (r.IsExternal()) {
+ return reinterpret_cast<int32_t>(
+ constant->ExternalReferenceValue().address());
+ }
int32_t value = constant->Integer32Value();
if (r.IsInteger32()) return value;
DCHECK(r.IsSmiOrTagged());
@@ -670,15 +673,23 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
if (op->IsStackSlot()) {
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
if (is_tagged) {
- translation->StoreStackSlot(op->index());
+ translation->StoreStackSlot(index);
} else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
+ translation->StoreUint32StackSlot(index);
} else {
- translation->StoreInt32StackSlot(op->index());
+ translation->StoreInt32StackSlot(index);
}
} else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
+ translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -2164,6 +2175,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ j(equal, instr->TrueLabel(chunk_));
}
+ if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ // SIMD value -> true.
+ __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
+ __ j(equal, instr->TrueLabel(chunk_));
+ }
+
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
@@ -2849,13 +2866,31 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
__ mov(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
- PREMONOMORPHIC).code();
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+ SLOPPY, PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(esi));
+ DCHECK(ToRegister(instr->result()).is(eax));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
+ Handle<Code> stub =
+ CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2968,7 +3003,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3040,40 +3075,31 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
instr->hydrogen()->key()->representation(),
elements_kind,
instr->base_offset()));
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
__ cvtss2sd(result, result);
- } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ } else if (elements_kind == FLOAT64_ELEMENTS) {
__ movsd(ToDoubleRegister(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
- case EXTERNAL_INT8_ELEMENTS:
case INT8_ELEMENTS:
__ movsx_b(result, operand);
break;
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
__ movzx_b(result, operand);
break;
- case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
__ movsx_w(result, operand);
break;
- case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
__ movzx_w(result, operand);
break;
- case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
__ mov(result, operand);
break;
- case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
@@ -3081,8 +3107,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
}
break;
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -3162,7 +3186,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3394,10 +3418,9 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- __ push(esi); // The context is the first argument.
__ push(Immediate(instr->hydrogen()->pairs()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kDeclareGlobals, 2, instr);
}
@@ -4065,7 +4088,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (operand_value->IsRegister()) {
Register value = ToRegister(operand_value);
__ Store(value, operand, representation);
- } else if (representation.IsInteger32()) {
+ } else if (representation.IsInteger32() || representation.IsExternal()) {
Immediate immediate = ToImmediate(operand_value, representation);
DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
__ mov(operand, immediate);
@@ -4112,6 +4135,30 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(esi));
+ DCHECK(ToRegister(instr->value())
+ .is(StoreGlobalViaContextDescriptor::ValueRegister()));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
+ Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
+ isolate(), depth, instr->language_mode())
+ .code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
+ __ CallRuntime(is_strict(instr->language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
if (instr->index()->IsConstantOperand()) {
@@ -4151,39 +4198,28 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
instr->hydrogen()->key()->representation(),
elements_kind,
instr->base_offset()));
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
XMMRegister xmm_scratch = double_scratch0();
__ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
__ movss(operand, xmm_scratch);
- } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ } else if (elements_kind == FLOAT64_ELEMENTS) {
__ movsd(operand, ToDoubleRegister(instr->value()));
} else {
Register value = ToRegister(instr->value());
switch (elements_kind) {
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
- case EXTERNAL_INT8_ELEMENTS:
case UINT8_ELEMENTS:
case INT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
__ mov_b(operand, value);
break;
- case EXTERNAL_INT16_ELEMENTS:
- case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
case INT16_ELEMENTS:
__ mov_w(operand, value);
break;
- case EXTERNAL_INT32_ELEMENTS:
- case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
case INT32_ELEMENTS:
__ mov(operand, value);
break;
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -4269,7 +4305,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases...external, fast-double, fast
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -5463,10 +5499,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
} else if (String::Equals(type_name, factory()->string_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- __ j(above_equal, false_label, false_distance);
- __ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- final_branch_condition = zero;
+ final_branch_condition = below;
} else if (String::Equals(type_name, factory()->symbol_string())) {
__ JumpIfSmi(input, false_label, false_distance);
@@ -5510,6 +5543,17 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
1 << Map::kIsUndetectable);
final_branch_condition = zero;
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(type_name, factory()->type##_string())) { \
+ __ JumpIfSmi(input, false_label, false_distance); \
+ __ cmp(FieldOperand(input, HeapObject::kMapOffset), \
+ factory()->type##_map()); \
+ final_branch_condition = equal;
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
+
} else {
__ jmp(false_label, false_distance);
}
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index 099e1f8ad4..0926a0f21a 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/ia32/lithium-codegen-ia32.h"
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.h b/deps/v8/src/ia32/lithium-gap-resolver-ia32.h
index fc87a8e259..d36e78b5f6 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.h
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.h
@@ -5,8 +5,6 @@
#ifndef V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
#define V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
-#include "src/v8.h"
-
#include "src/lithium.h"
namespace v8 {
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 64677de83d..b7a85cb228 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -4,8 +4,6 @@
#include <sstream>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/hydrogen-osr.h"
@@ -379,6 +377,11 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
}
+void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d", depth(), slot_index());
+}
+
+
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -397,6 +400,12 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
+void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
+ value()->PrintTo(stream);
+}
+
+
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -1615,8 +1624,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else if (instr->representation().IsExternal()) {
- DCHECK(instr->left()->representation().IsExternal());
- DCHECK(instr->right()->representation().IsInteger32());
+ DCHECK(instr->IsConsistentExternalRepresentation());
DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
bool use_lea = LAddI::UseLea(instr);
LOperand* left = UseRegisterAtStart(instr->left());
@@ -2144,6 +2152,15 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
+ HLoadGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ DCHECK(instr->slot_index() > 0);
+ LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2220,7 +2237,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
: UseRegisterOrConstantAtStart(instr->key());
LInstruction* result = NULL;
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
} else {
@@ -2234,10 +2251,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
bool needs_environment;
- if (instr->is_external() || instr->is_fixed_typed_array()) {
+ if (instr->is_fixed_typed_array()) {
// see LCodeGen::DoLoadKeyedExternalArray
- needs_environment = (elements_kind == EXTERNAL_UINT32_ELEMENTS ||
- elements_kind == UINT32_ELEMENTS) &&
+ needs_environment = elements_kind == UINT32_ELEMENTS &&
!instr->CheckFlag(HInstruction::kUint32);
} else {
// see LCodeGen::DoLoadKeyedFixedDoubleArray and
@@ -2274,9 +2290,6 @@ LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
// Determine if we need a byte register in this case for the value.
bool val_is_fixed_register =
- elements_kind == EXTERNAL_INT8_ELEMENTS ||
- elements_kind == EXTERNAL_UINT8_ELEMENTS ||
- elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
elements_kind == UINT8_ELEMENTS ||
elements_kind == INT8_ELEMENTS ||
elements_kind == UINT8_CLAMPED_ELEMENTS;
@@ -2289,7 +2302,7 @@ LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
DCHECK(instr->elements()->representation().IsTagged());
DCHECK(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsSmi());
@@ -2324,10 +2337,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
!IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->value()->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(elements_kind)));
- DCHECK((instr->is_fixed_typed_array() &&
- instr->elements()->representation().IsTagged()) ||
- (instr->is_external() &&
- instr->elements()->representation().IsExternal()));
+ DCHECK(instr->elements()->representation().IsExternal());
LOperand* backing_store = UseRegister(instr->elements());
LOperand* val = GetStoreKeyedValueOperand(instr);
@@ -2483,6 +2493,19 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
+ HStoreGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* value = UseFixed(instr->value(),
+ StoreGlobalViaContextDescriptor::ValueRegister());
+ DCHECK(instr->slot_index() > 0);
+
+ LStoreGlobalViaContext* result =
+ new (zone()) LStoreGlobalViaContext(context, value);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 6a123d6ace..9224dcba98 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -105,6 +105,7 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
+ V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -145,6 +146,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
+ V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1639,15 +1641,9 @@ class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- bool is_external() const {
- return hydrogen()->is_external();
- }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@@ -1667,12 +1663,8 @@ inline static bool ExternalArrayOpRequiresTemp(
// an index cannot fold the scale operation into a load and need an extra
// temp register to do the work.
return key_representation.IsSmi() &&
- (elements_kind == EXTERNAL_INT8_ELEMENTS ||
- elements_kind == EXTERNAL_UINT8_ELEMENTS ||
- elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
- elements_kind == UINT8_ELEMENTS ||
- elements_kind == INT8_ELEMENTS ||
- elements_kind == UINT8_CLAMPED_ELEMENTS);
+ (elements_kind == UINT8_ELEMENTS || elements_kind == INT8_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS);
}
@@ -1713,7 +1705,23 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
+ TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+
+class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ LOperand* context() { return inputs_[0]; }
+
+ int depth() const { return hydrogen()->depth(); }
+ int slot_index() const { return hydrogen()->slot_index(); }
};
@@ -2209,6 +2217,28 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
+class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreGlobalViaContext(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
+ "store-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int depth() { return hydrogen()->depth(); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
@@ -2217,13 +2247,9 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
inputs_[2] = val;
}
- bool is_external() const { return hydrogen()->is_external(); }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 6e43c485fc..2ad52208e4 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/base/bits.h"
@@ -11,7 +9,8 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
+#include "src/ia32/frames-ia32.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -572,9 +571,10 @@ void MacroAssembler::RecordWrite(
void MacroAssembler::DebugBreak() {
Move(eax, Immediate(0));
- mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
+ mov(ebx, Immediate(ExternalReference(Runtime::kHandleDebuggerStatement,
+ isolate())));
CEntryStub ces(isolate(), 1);
- call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
}
@@ -1494,20 +1494,6 @@ void MacroAssembler::Allocate(Register object_size,
}
-void MacroAssembler::UndoAllocationInNewSpace(Register object) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- and_(object, Immediate(~kHeapObjectTagMask));
-#ifdef DEBUG
- cmp(object, Operand::StaticVariable(new_space_allocation_top));
- Check(below, kUndoAllocationOfNonAllocatedMemory);
-#endif
- mov(Operand::StaticVariable(new_space_allocation_top), object);
-}
-
-
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
@@ -3192,14 +3178,22 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
DCHECK(!scratch1.is(scratch0));
Factory* factory = isolate()->factory();
Register current = scratch0;
- Label loop_again;
+ Label loop_again, end;
// scratch contained elements pointer.
mov(current, object);
+ mov(current, FieldOperand(current, HeapObject::kMapOffset));
+ mov(current, FieldOperand(current, Map::kPrototypeOffset));
+ cmp(current, Immediate(factory->null_value()));
+ j(equal, &end);
// Loop based on the map going up the prototype chain.
bind(&loop_again);
mov(current, FieldOperand(current, HeapObject::kMapOffset));
+ STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ CmpInstanceType(current, JS_OBJECT_TYPE);
+ j(below, found);
mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
@@ -3207,6 +3201,8 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
mov(current, FieldOperand(current, Map::kPrototypeOffset));
cmp(current, Immediate(factory->null_value()));
j(not_equal, &loop_again);
+
+ bind(&end);
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 69b0c5f5a8..b228ef9a28 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -13,6 +13,19 @@
namespace v8 {
namespace internal {
+// Give alias names to registers for calling conventions.
+const Register kReturnRegister0 = {kRegister_eax_Code};
+const Register kReturnRegister1 = {kRegister_edx_Code};
+const Register kJSFunctionRegister = {kRegister_edi_Code};
+const Register kContextRegister = {kRegister_esi_Code};
+const Register kInterpreterAccumulatorRegister = {kRegister_eax_Code};
+const Register kInterpreterRegisterFileRegister = {kRegister_edx_Code};
+const Register kInterpreterBytecodeOffsetRegister = {kRegister_ecx_Code};
+const Register kInterpreterBytecodeArrayRegister = {kRegister_edi_Code};
+const Register kInterpreterDispatchTableRegister = {kRegister_ebx_Code};
+const Register kRuntimeCallFunctionRegister = {kRegister_ebx_Code};
+const Register kRuntimeCallArgCountRegister = {kRegister_eax_Code};
+
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
@@ -635,12 +648,6 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. Make sure that no pointers are left to the
- // object(s) no longer allocated as they would be invalid when allocation is
- // un-done.
- void UndoAllocationInNewSpace(Register object);
-
// Allocate a heap number in new space with undefined value. The
// register scratch2 can be passed as no_reg; the others must be
// valid registers. Returns tagged pointer in result register, or
@@ -1031,7 +1038,7 @@ class MacroAssembler: public Assembler {
class CodePatcher {
public:
CodePatcher(byte* address, int size);
- virtual ~CodePatcher();
+ ~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
@@ -1074,6 +1081,11 @@ inline Operand ContextOperand(Register context, int index) {
}
+inline Operand ContextOperand(Register context, Register index) {
+ return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
+}
+
+
inline Operand GlobalObjectOperand() {
return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX);
}
diff --git a/deps/v8/src/ic/OWNERS b/deps/v8/src/ic/OWNERS
index 81e8577125..3581afece3 100644
--- a/deps/v8/src/ic/OWNERS
+++ b/deps/v8/src/ic/OWNERS
@@ -1 +1,7 @@
+set noparent
+
+bmeurer@chromium.org
+ishell@chromium.org
+jkummerow@chromium.org
+mvstanton@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/ic/access-compiler.cc b/deps/v8/src/ic/access-compiler.cc
index 6fffeda08b..0dc9ab6e8d 100644
--- a/deps/v8/src/ic/access-compiler.cc
+++ b/deps/v8/src/ic/access-compiler.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/ic/access-compiler.h"
diff --git a/deps/v8/src/ic/arm/access-compiler-arm.cc b/deps/v8/src/ic/arm/access-compiler-arm.cc
index 73ef09663e..3b0c0c26c7 100644
--- a/deps/v8/src/ic/arm/access-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/access-compiler-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/ic/access-compiler.h"
@@ -33,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(r3.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ DCHECK(r3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, r3, r4, r5};
return registers;
}
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index 6af65e2cf2..6f4ddcf98a 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/ic/call-optimization.h"
@@ -223,10 +221,9 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, IC::UtilityId id) {
+ Handle<JSObject> holder_obj, Runtime::FunctionId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
- NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
}
@@ -315,9 +312,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
}
@@ -328,9 +323,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
}
@@ -352,11 +345,17 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
+void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
+ Register scratch) {
+ DCHECK(false); // Not implemented.
+}
+
+
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register map_reg,
Register scratch,
Label* miss) {
Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- Register map_reg = StoreTransitionDescriptor::MapRegister();
DCHECK(!map_reg.is(scratch));
__ LoadWeakValue(map_reg, cell, miss);
if (transition->CanBeDeprecated()) {
@@ -664,7 +663,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
// of this method.)
CompileCallLoadPropertyWithInterceptor(
masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
+ Runtime::kLoadPropertyWithInterceptorOnly);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
@@ -696,10 +695,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
+ NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
}
@@ -724,9 +721,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(ip, value());
// Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -738,9 +733,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
- ExternalReference store_ic_property = ExternalReference(
- IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
- __ TailCallExternalReference(store_ic_property, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index a3b74ce9f3..a805f4ccee 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/codegen.h"
@@ -313,9 +311,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
int arg_count = 4;
- __ TailCallExternalReference(ref, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
}
@@ -344,10 +341,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
int arg_count = 4;
- __ TailCallExternalReference(ref, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
}
@@ -434,16 +429,17 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, r4, r5, r6, r9));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_load_dummy_vector());
- int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ mov(slot, Operand(Smi::FromInt(slot_index)));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::KEYED_LOAD_IC, flags, false, receiver, key, r4, r5, r6, r9);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
+ receiver, key, r4, r5, r6, r9);
// Cache miss.
GenerateMiss(masm);
@@ -467,14 +463,24 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
}
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+ } else {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
+ }
+}
+
+
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ int args = FLAG_vector_stores ? 5 : 3;
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
}
@@ -658,9 +664,12 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
__ cmp(r4, Operand(JS_ARRAY_TYPE));
__ b(eq, &array);
- // Check that the object is some kind of JSObject.
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(lt, &slow);
+ // Check that the object is some kind of JS object EXCEPT JS Value type. In
+ // the case that the object is a value-wrapper object, we enter the runtime
+ // system to make sure that indexing into string objects works as intended.
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ __ cmp(r4, Operand(JS_OBJECT_TYPE));
+ __ b(lo, &slow);
// Object case: Check key against length in the elements array.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -689,17 +698,18 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Register vector = VectorStoreICDescriptor::VectorRegister();
Register slot = VectorStoreICDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, r3, r4, r5, r6));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_store_dummy_vector());
- int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ LoadRoot(vector, Heap::kKeyedStoreDummyVectorRootIndex);
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ mov(slot, Operand(Smi::FromInt(slot_index)));
}
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, false, receiver, key, r3, r4, r5, r6);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+ receiver, key, r3, r4, r5, r6);
// Cache miss.
__ b(&miss);
@@ -760,8 +770,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, false, receiver, name, r3, r4, r5, r6);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+ receiver, name, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -769,13 +779,11 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
void StoreIC::GenerateMiss(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ int args = FLAG_vector_stores ? 5 : 3;
+ __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
}
diff --git a/deps/v8/src/ic/arm/ic-compiler-arm.cc b/deps/v8/src/ic/arm/ic-compiler-arm.cc
index e42f2f7898..ff2bcf05b1 100644
--- a/deps/v8/src/ic/arm/ic-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/ic-compiler-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/ic/ic.h"
@@ -65,7 +63,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
// Polymorphic keyed stores may use the map register
Register map_reg = scratch1();
DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ map_reg.is(StoreTransitionDescriptor::MapRegister()));
int receiver_count = maps->length();
int number_of_handled_maps = 0;
diff --git a/deps/v8/src/ic/arm/stub-cache-arm.cc b/deps/v8/src/ic/arm/stub-cache-arm.cc
index aa247d230f..cdd04faf38 100644
--- a/deps/v8/src/ic/arm/stub-cache-arm.cc
+++ b/deps/v8/src/ic/arm/stub-cache-arm.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
#include "src/codegen.h"
@@ -18,7 +16,7 @@ namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags,
StubCache::Table table, Register receiver, Register name,
// Number of the cache entry, not scaled.
Register offset, Register scratch, Register scratch2,
@@ -86,8 +84,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
#endif
- if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
-
// Jump to the first instruction in the code stub.
__ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -97,10 +93,9 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, bool leave_frame,
- Register receiver, Register name,
- Register scratch, Register extra, Register extra2,
- Register extra3) {
+ Code::Flags flags, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
@@ -153,8 +148,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ and_(scratch, scratch, Operand(mask));
// Probe the primary table.
- ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
- name, scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
+ extra, extra2, extra3);
// Primary miss: Compute hash for secondary probe.
__ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
@@ -163,8 +158,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ and_(scratch, scratch, Operand(mask2));
// Probe the secondary table.
- ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
- name, scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
+ extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/arm64/access-compiler-arm64.cc b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
index e77476f0a8..14b0fa7f16 100644
--- a/deps/v8/src/ic/arm64/access-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/ic/access-compiler.h"
@@ -40,7 +38,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, value, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(x3.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ DCHECK(x3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, x3, x4, x5};
return registers;
}
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 3986c0ed66..71c70da7a4 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/ic/call-optimization.h"
@@ -132,11 +130,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, IC::UtilityId id) {
+ Handle<JSObject> holder_obj, Runtime::FunctionId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
- NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
}
@@ -308,9 +305,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
}
@@ -323,9 +318,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
}
@@ -375,9 +368,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
- ExternalReference store_ic_property = ExternalReference(
- IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
- __ TailCallExternalReference(store_ic_property, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -403,11 +394,17 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
+void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
+ Register scratch) {
+ DCHECK(false); // Not implemented.
+}
+
+
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register map_reg,
Register scratch,
Label* miss) {
Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- Register map_reg = StoreTransitionDescriptor::MapRegister();
DCHECK(!map_reg.is(scratch));
__ LoadWeakValue(map_reg, cell, miss);
if (transition->CanBeDeprecated()) {
@@ -728,7 +725,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
// of this method.)
CompileCallLoadPropertyWithInterceptor(
masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
+ Runtime::kLoadPropertyWithInterceptorOnly);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
@@ -759,10 +756,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
+ NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
}
@@ -790,9 +785,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
// Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index 13dd3913ae..27c4f71431 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/codegen.h"
@@ -296,9 +294,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadWithVectorDescriptor::NameRegister(),
LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister());
- ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
int arg_count = 4;
- __ TailCallExternalReference(ref, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
}
@@ -328,10 +325,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadWithVectorDescriptor::VectorRegister());
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
int arg_count = 4;
- __ TailCallExternalReference(ref, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
}
@@ -412,16 +407,17 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, scratch1, scratch2, scratch3, scratch4));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_load_dummy_vector());
- int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ Mov(slot, Operand(Smi::FromInt(slot_index)));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
- false, receiver, key, scratch1,
+ receiver, key, scratch1,
scratch2, scratch3, scratch4);
// Cache miss.
KeyedLoadIC::GenerateMiss(masm);
@@ -474,16 +470,25 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
}
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+ } else {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
+ }
+}
+
+
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
ASM_LOCATION("KeyedStoreIC::GenerateMiss");
+ StoreIC_PushArgs(masm);
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ int args = FLAG_vector_stores ? 5 : 3;
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
}
@@ -657,9 +662,12 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Register instance_type = x10;
__ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
__ B(eq, &array);
- // Check that the object is some kind of JSObject.
- __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
- __ B(lt, &slow);
+ // Check that the object is some kind of JS object EXCEPT JS Value type. In
+ // the case that the object is a value-wrapper object, we enter the runtime
+ // system to make sure that indexing into string objects works as intended.
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ __ Cmp(instance_type, JS_OBJECT_TYPE);
+ __ B(lo, &slow);
// Object case: Check key against length in the elements array.
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -689,17 +697,18 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Register vector = VectorStoreICDescriptor::VectorRegister();
Register slot = VectorStoreICDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, x3, x4, x5, x6));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_store_dummy_vector());
- int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ LoadRoot(vector, Heap::kKeyedStoreDummyVectorRootIndex);
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ Mov(slot, Operand(Smi::FromInt(slot_index)));
}
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, false, receiver, key, x3, x4, x5, x6);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+ receiver, key, x3, x4, x5, x6);
// Cache miss.
__ B(&miss);
@@ -758,8 +767,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// Probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, false, receiver, name, x3, x4, x5, x6);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+ receiver, name, x3, x4, x5, x6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -767,13 +776,11 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
void StoreIC::GenerateMiss(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// Tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ int args = FLAG_vector_stores ? 5 : 3;
+ __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
}
diff --git a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
index f9eab7d9d2..a86b5e53b5 100644
--- a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/ic/ic.h"
@@ -65,7 +63,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
// Polymorphic keyed stores may use the map register
Register map_reg = scratch1();
DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ map_reg.is(StoreTransitionDescriptor::MapRegister()));
__ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = maps->length();
int number_of_handled_maps = 0;
diff --git a/deps/v8/src/ic/arm64/stub-cache-arm64.cc b/deps/v8/src/ic/arm64/stub-cache-arm64.cc
index ba5cbddb64..ecd7fe1534 100644
--- a/deps/v8/src/ic/arm64/stub-cache-arm64.cc
+++ b/deps/v8/src/ic/arm64/stub-cache-arm64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#include "src/codegen.h"
@@ -25,7 +23,7 @@ namespace internal {
//
// 'receiver', 'name' and 'offset' registers are preserved on miss.
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags,
StubCache::Table table, Register receiver, Register name,
Register offset, Register scratch, Register scratch2,
Register scratch3) {
@@ -81,8 +79,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
#endif
- if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
-
// Jump to the first instruction in the code stub.
__ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
__ Br(scratch);
@@ -93,10 +89,9 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, bool leave_frame,
- Register receiver, Register name,
- Register scratch, Register extra, Register extra2,
- Register extra3) {
+ Code::Flags flags, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
@@ -139,8 +134,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
CountTrailingZeros(kPrimaryTableSize, 64));
// Probe the primary table.
- ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
- name, scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
+ extra, extra2, extra3);
// Primary miss: Compute hash for secondary table.
__ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
@@ -148,8 +143,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ And(scratch, scratch, kSecondaryTableSize - 1);
// Probe the secondary table.
- ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
- name, scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
+ extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index 31f5437228..45717b50a7 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/ic/call-optimization.h"
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index 26d195326f..98b30aa54d 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/ic/handler-compiler.h"
#include "src/cpu-profiler.h"
#include "src/ic/call-optimization.h"
-#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/ic-inl.h"
@@ -99,21 +98,11 @@ Register NamedLoadHandlerCompiler::FrontendHeader(Register object_reg,
Handle<Name> name,
Label* miss,
ReturnHolder return_what) {
- PrototypeCheckType check_type = CHECK_ALL_MAPS;
- int function_index = -1;
- if (map()->instance_type() < FIRST_NONSTRING_TYPE) {
- function_index = Context::STRING_FUNCTION_INDEX;
- } else if (map()->instance_type() == SYMBOL_TYPE) {
- function_index = Context::SYMBOL_FUNCTION_INDEX;
- } else if (map()->instance_type() == HEAP_NUMBER_TYPE) {
- function_index = Context::NUMBER_FUNCTION_INDEX;
- } else if (*map() == isolate()->heap()->boolean_map()) {
- function_index = Context::BOOLEAN_FUNCTION_INDEX;
- } else {
- check_type = SKIP_RECEIVER;
- }
-
- if (check_type == CHECK_ALL_MAPS) {
+ PrototypeCheckType check_type = SKIP_RECEIVER;
+ int function_index = map()->IsPrimitiveMap()
+ ? map()->GetConstructorFunctionIndex()
+ : Map::kNoConstructorFunctionIndex;
+ if (function_index != Map::kNoConstructorFunctionIndex) {
GenerateDirectLoadGlobalFunctionPrototype(masm(), function_index,
scratch1(), miss);
Object* function = isolate()->native_context()->get(function_index);
@@ -121,6 +110,7 @@ Register NamedLoadHandlerCompiler::FrontendHeader(Register object_reg,
Handle<Map> map(JSObject::cast(prototype)->map());
set_map(map);
object_reg = scratch1();
+ check_type = CHECK_ALL_MAPS;
}
// Check that the maps starting from the prototype haven't changed.
@@ -463,11 +453,17 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
DCHECK(!transition->is_access_check_needed());
// Call to respective StoreTransitionStub.
+ Register transition_map_reg = StoreTransitionDescriptor::MapRegister();
+ bool push_map_on_stack = transition_map_reg.is(no_reg);
+ Register map_reg = push_map_on_stack ? scratch1() : transition_map_reg;
+
if (details.type() == DATA_CONSTANT) {
- GenerateRestoreMap(transition, scratch2(), &miss);
DCHECK(descriptors->GetValue(descriptor)->IsJSFunction());
- Register map_reg = StoreTransitionDescriptor::MapRegister();
+ GenerateRestoreMap(transition, map_reg, scratch2(), &miss);
GenerateConstantCheck(map_reg, descriptor, value(), scratch2(), &miss);
+ if (push_map_on_stack) {
+ GeneratePushMap(map_reg, scratch2());
+ }
GenerateRestoreName(name);
StoreTransitionStub stub(isolate());
GenerateTailCall(masm(), stub.GetCode());
@@ -482,7 +478,10 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
? StoreTransitionStub::ExtendStorageAndStoreMapAndValue
: StoreTransitionStub::StoreMapAndValue;
- GenerateRestoreMap(transition, scratch2(), &miss);
+ GenerateRestoreMap(transition, map_reg, scratch2(), &miss);
+ if (push_map_on_stack) {
+ GeneratePushMap(map_reg, scratch2());
+ }
GenerateRestoreName(name);
StoreTransitionStub stub(isolate(),
FieldIndex::ForDescriptor(*transition, descriptor),
@@ -566,7 +565,6 @@ void ElementHandlerCompiler::CompileElementHandlers(
} else if (IsSloppyArgumentsElements(elements_kind)) {
cached_stub = KeyedLoadSloppyArgumentsStub(isolate()).GetCode();
} else if (IsFastElementsKind(elements_kind) ||
- IsExternalArrayElementsKind(elements_kind) ||
IsFixedTypedArrayElementsKind(elements_kind)) {
cached_stub = LoadFastElementStub(isolate(), is_js_array, elements_kind,
convert_hole_to_undefined).GetCode();
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index 99bf5e6a7a..05c973a625 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -251,11 +251,12 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
virtual void FrontendFooter(Handle<Name> name, Label* miss);
void GenerateRestoreName(Label* label, Handle<Name> name);
+ void GeneratePushMap(Register map_reg, Register scratch);
private:
void GenerateRestoreName(Handle<Name> name);
- void GenerateRestoreMap(Handle<Map> transition, Register scratch,
- Label* miss);
+ void GenerateRestoreMap(Handle<Map> transition, Register map_reg,
+ Register scratch, Label* miss);
void GenerateConstantCheck(Register map_reg, int descriptor,
Register value_reg, Register scratch,
diff --git a/deps/v8/src/ic/ia32/access-compiler-ia32.cc b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
index 422a0be5f0..81579e5dc3 100644
--- a/deps/v8/src/ic/ia32/access-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/ic/access-compiler.h"
@@ -32,7 +30,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(ebx.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ DCHECK(ebx.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, ebx, edi, no_reg};
return registers;
}
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 8f5200aee6..c45821fe8b 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/ic/call-optimization.h"
@@ -294,10 +292,9 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, IC::UtilityId id) {
+ Handle<JSObject> holder_obj, Runtime::FunctionId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
- NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
}
@@ -321,8 +318,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
}
@@ -331,8 +327,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
}
@@ -354,11 +349,20 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
+void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
+ Register scratch) {
+ // Get the return address, push the argument and then continue.
+ __ pop(scratch);
+ __ push(map_reg);
+ __ push(scratch);
+}
+
+
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register map_reg,
Register scratch,
Label* miss) {
Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- Register map_reg = StoreTransitionDescriptor::MapRegister();
DCHECK(!map_reg.is(scratch));
__ LoadWeakValue(map_reg, cell, miss);
if (transition->CanBeDeprecated()) {
@@ -669,7 +673,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
// of this method.)
CompileCallLoadPropertyWithInterceptor(
masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
+ Runtime::kLoadPropertyWithInterceptorOnly);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
@@ -710,10 +714,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
holder());
__ push(scratch2()); // restore old return address
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
+ NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
}
@@ -738,9 +740,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -756,9 +756,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
- ExternalReference store_ic_property = ExternalReference(
- IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
- __ TailCallExternalReference(store_ic_property, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
index abeacc86d4..a1e2cbcefe 100644
--- a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/ic/ic.h"
@@ -69,7 +67,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
// Polymorphic keyed stores may use the map register
Register map_reg = scratch1();
DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ map_reg.is(StoreTransitionDescriptor::MapRegister()));
__ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = maps->length();
int number_of_handled_maps = 0;
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index d59e58521a..d683264e13 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/codegen.h"
@@ -157,10 +155,9 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
(1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
__ j(not_zero, slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing
- // into string objects works as intended.
+ // Check that the object is some kind of JS object EXCEPT JS Value type. In
+ // the case that the object is a value-wrapper object, we enter the runtime
+ // system to make sure that indexing into string objects works as intended.
DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ CmpInstanceType(map, JS_OBJECT_TYPE);
@@ -341,16 +338,17 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- isolate->factory()->keyed_load_dummy_vector());
- int slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(isolate);
+ int slot = dummy_vector->GetIndex(
+ FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ push(Immediate(Smi::FromInt(slot)));
__ push(Immediate(dummy_vector));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
- false, receiver, key, ebx, edi);
+ receiver, key, ebx, edi);
__ pop(LoadWithVectorDescriptor::VectorRegister());
__ pop(LoadDescriptor::SlotRegister());
@@ -539,8 +537,11 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfNotSmi(key, &maybe_name_key);
__ CmpInstanceType(edi, JS_ARRAY_TYPE);
__ j(equal, &array);
- // Check that the object is some kind of JSObject.
- __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
+ // Check that the object is some kind of JS object EXCEPT JS Value type. In
+ // the case that the object is a value-wrapper object, we enter the runtime
+ // system to make sure that indexing into string objects works as intended.
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ __ CmpInstanceType(edi, JS_OBJECT_TYPE);
__ j(below, &slow);
// Object case: Check key against length in the elements array.
@@ -565,17 +566,18 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
if (FLAG_vector_stores) {
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_store_dummy_vector());
- int slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot = dummy_vector->GetIndex(
+ FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
__ push(Immediate(Smi::FromInt(slot)));
__ push(Immediate(dummy_vector));
}
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, false, receiver, key, ebx, no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+ receiver, key, ebx, no_reg);
if (FLAG_vector_stores) {
__ pop(VectorStoreICDescriptor::VectorRegister());
@@ -676,10 +678,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
int arg_count = 4;
- __ TailCallExternalReference(ref, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
}
@@ -709,10 +709,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
int arg_count = 4;
- __ TailCallExternalReference(ref, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
}
@@ -740,7 +738,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, false, StoreDescriptor::ReceiverRegister(),
+ masm, Code::STORE_IC, flags, StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister(), ebx, no_reg);
// Cache miss: Jump to runtime.
@@ -753,13 +751,24 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
-
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(value);
- __ push(ebx);
+ if (FLAG_vector_stores) {
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+
+ __ xchg(receiver, Operand(esp, 0));
+ __ push(name);
+ __ push(value);
+ __ push(slot);
+ __ push(vector);
+ __ push(receiver); // Contains the return address.
+ } else {
+ DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+ __ pop(ebx);
+ __ push(receiver);
+ __ push(name);
+ __ push(value);
+ __ push(ebx);
+ }
}
@@ -768,9 +777,8 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ int args = FLAG_vector_stores ? 5 : 3;
+ __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
}
@@ -806,9 +814,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ int args = FLAG_vector_stores ? 5 : 3;
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
}
diff --git a/deps/v8/src/ic/ia32/stub-cache-ia32.cc b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
index aa807a77a6..68b30e7bdb 100644
--- a/deps/v8/src/ic/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/codegen.h"
@@ -18,7 +16,7 @@ namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags,
StubCache::Table table, Register name, Register receiver,
// Number of the cache entry pointer-size scaled.
Register offset, Register extra) {
@@ -65,8 +63,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ pop(LoadDescriptor::SlotRegister());
}
- if (leave_frame) __ leave();
-
// Jump to the first instruction in the code stub.
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(extra);
@@ -120,8 +116,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ pop(slot);
}
- if (leave_frame) __ leave();
-
// Jump to the first instruction in the code stub.
__ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(offset);
@@ -134,10 +128,9 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, bool leave_frame,
- Register receiver, Register name,
- Register scratch, Register extra, Register extra2,
- Register extra3) {
+ Code::Flags flags, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
Label miss;
// Assert that code is valid. The multiplying code relies on the entry size
@@ -180,8 +173,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
DCHECK(kCacheIndexShift == kPointerSizeLog2);
// Probe the primary table.
- ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kPrimary, name,
- receiver, offset, extra);
+ ProbeTable(isolate(), masm, ic_kind, flags, kPrimary, name, receiver, offset,
+ extra);
// Primary miss: Compute hash for secondary probe.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
@@ -193,8 +186,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
// Probe the secondary table.
- ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kSecondary, name,
- receiver, offset, extra);
+ ProbeTable(isolate(), masm, ic_kind, flags, kSecondary, name, receiver,
+ offset, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc
index a5ae6cfff4..d7b95dada2 100644
--- a/deps/v8/src/ic/ic-compiler.cc
+++ b/deps/v8/src/ic/ic-compiler.cc
@@ -2,12 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/ic/ic-compiler.h"
#include "src/cpu-profiler.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic-inl.h"
-#include "src/ic/ic-compiler.h"
namespace v8 {
@@ -109,7 +108,6 @@ Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
} else if (receiver_map->has_sloppy_arguments_elements()) {
stub = KeyedLoadSloppyArgumentsStub(isolate).GetCode();
} else if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements() ||
receiver_map->has_fixed_typed_array_elements()) {
stub = LoadFastElementStub(isolate, is_js_array, elements_kind,
convert_hole_to_undefined).GetCode();
@@ -369,7 +367,6 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
if (IsSloppyArgumentsElements(elements_kind)) {
cached_stub = KeyedStoreSloppyArgumentsStub(isolate()).GetCode();
} else if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements() ||
receiver_map->has_fixed_typed_array_elements()) {
cached_stub = StoreFastElementStub(isolate(), is_js_array,
elements_kind, store_mode).GetCode();
@@ -401,7 +398,6 @@ Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic(
if (receiver_map->has_sloppy_arguments_elements()) {
stub = KeyedStoreSloppyArgumentsStub(isolate()).GetCode();
} else if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements() ||
receiver_map->has_fixed_typed_array_elements()) {
stub = StoreFastElementStub(isolate(), is_jsarray, elements_kind,
store_mode).GetCode();
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index b6ee6b13e5..bce3c1206d 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -8,7 +8,7 @@
#include "src/ic/ic.h"
#include "src/compiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/macro-assembler.h"
#include "src/prototype.h"
@@ -18,56 +18,15 @@ namespace internal {
Address IC::address() const {
// Get the address of the call.
- Address result = Assembler::target_address_from_return_address(pc());
-
- Debug* debug = isolate()->debug();
- // First check if any break points are active if not just return the address
- // of the call.
- if (!debug->has_break_points()) return result;
-
- // At least one break point is active perform additional test to ensure that
- // break point locations are updated correctly.
- if (debug->IsDebugBreak(
- Assembler::target_address_at(result, raw_constant_pool()))) {
- // If the call site is a call to debug break then return the address in
- // the original code instead of the address in the running code. This will
- // cause the original code to be updated and keeps the breakpoint active in
- // the running code.
- Code* code = GetCode();
- Code* original_code = GetOriginalCode();
- intptr_t delta =
- original_code->instruction_start() - code->instruction_start();
- // Return the address in the original code. This is the place where
- // the call which has been overwritten by the DebugBreakXXX resides
- // and the place where the inline cache system should look.
- return result + delta;
- } else {
- // No break point here just return the address of the call.
- return result;
- }
+ return Assembler::target_address_from_return_address(pc());
}
Address IC::constant_pool() const {
- if (!FLAG_enable_embedded_constant_pool) {
- return NULL;
+ if (FLAG_enable_embedded_constant_pool) {
+ return raw_constant_pool();
} else {
- Address constant_pool = raw_constant_pool();
- Debug* debug = isolate()->debug();
- // First check if any break points are active if not just return the
- // original constant pool.
- if (!debug->has_break_points()) return constant_pool;
-
- // At least one break point is active perform additional test to ensure that
- // break point locations are updated correctly.
- Address target = Assembler::target_address_from_return_address(pc());
- if (debug->IsDebugBreak(
- Assembler::target_address_at(target, constant_pool))) {
- // If the call site is a call to debug break then we want to return the
- // constant pool for the original code instead of the breakpointed code.
- return GetOriginalCode()->constant_pool();
- }
- return constant_pool;
+ return NULL;
}
}
@@ -134,8 +93,8 @@ void IC::set_target(Code* code) {
void LoadIC::set_target(Code* code) {
// The contextual mode must be preserved across IC patching.
- DCHECK(LoadICState::GetContextualMode(code->extra_ic_state()) ==
- LoadICState::GetContextualMode(target()->extra_ic_state()));
+ DCHECK(LoadICState::GetTypeofMode(code->extra_ic_state()) ==
+ LoadICState::GetTypeofMode(target()->extra_ic_state()));
// Strongness must be preserved across IC patching.
DCHECK(LoadICState::GetLanguageMode(code->extra_ic_state()) ==
LoadICState::GetLanguageMode(target()->extra_ic_state()));
@@ -168,18 +127,15 @@ void IC::UpdateTarget() { target_ = handle(raw_target(), isolate_); }
JSFunction* IC::GetRootConstructor(Map* receiver_map, Context* native_context) {
- Isolate* isolate = receiver_map->GetIsolate();
- if (receiver_map == isolate->heap()->boolean_map()) {
- return native_context->boolean_function();
- } else if (receiver_map->instance_type() == HEAP_NUMBER_TYPE) {
- return native_context->number_function();
- } else if (receiver_map->instance_type() < FIRST_NONSTRING_TYPE) {
- return native_context->string_function();
- } else if (receiver_map->instance_type() == SYMBOL_TYPE) {
- return native_context->symbol_function();
- } else {
- return NULL;
+ DisallowHeapAllocation no_alloc;
+ if (receiver_map->IsPrimitiveMap()) {
+ int constructor_function_index =
+ receiver_map->GetConstructorFunctionIndex();
+ if (constructor_function_index != Map::kNoConstructorFunctionIndex) {
+ return JSFunction::cast(native_context->get(constructor_function_index));
+ }
}
+ return nullptr;
}
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index 8ab08bc08d..fc33c80487 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/ic/ic-state.h"
#include "src/ic/ic.h"
-#include "src/ic/ic-state.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index e1fed19133..0b4b9cdc99 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -202,10 +202,10 @@ class CompareICState {
class LoadICState final BASE_EMBEDDED {
private:
- class ContextualModeBits : public BitField<ContextualMode, 0, 1> {};
+ class TypeofModeBits : public BitField<TypeofMode, 0, 1> {};
class LanguageModeBits
- : public BitField<LanguageMode, ContextualModeBits::kNext, 2> {};
- STATIC_ASSERT(static_cast<int>(NOT_CONTEXTUAL) == 0);
+ : public BitField<LanguageMode, TypeofModeBits::kNext, 2> {};
+ STATIC_ASSERT(static_cast<int>(INSIDE_TYPEOF) == 0);
const ExtraICState state_;
public:
@@ -216,22 +216,20 @@ class LoadICState final BASE_EMBEDDED {
explicit LoadICState(ExtraICState extra_ic_state) : state_(extra_ic_state) {}
- explicit LoadICState(ContextualMode mode, LanguageMode language_mode)
- : state_(ContextualModeBits::encode(mode) |
+ explicit LoadICState(TypeofMode typeof_mode, LanguageMode language_mode)
+ : state_(TypeofModeBits::encode(typeof_mode) |
LanguageModeBits::encode(language_mode)) {}
ExtraICState GetExtraICState() const { return state_; }
- ContextualMode contextual_mode() const {
- return ContextualModeBits::decode(state_);
- }
+ TypeofMode typeof_mode() const { return TypeofModeBits::decode(state_); }
LanguageMode language_mode() const {
return LanguageModeBits::decode(state_);
}
- static ContextualMode GetContextualMode(ExtraICState state) {
- return LoadICState(state).contextual_mode();
+ static TypeofMode GetTypeofMode(ExtraICState state) {
+ return LoadICState(state).typeof_mode();
}
static LanguageMode GetLanguageMode(ExtraICState state) {
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 9f75af8eb3..4ed85d5842 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/ic/ic.h"
#include "src/accessors.h"
#include "src/api.h"
@@ -11,6 +11,7 @@
#include "src/codegen.h"
#include "src/conversions.h"
#include "src/execution.h"
+#include "src/frames-inl.h"
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic-inl.h"
@@ -140,8 +141,7 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
#define TRACE_IC(type, name) TraceIC(type, name)
-IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus,
- bool for_queries_only)
+IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
: isolate_(isolate),
target_set_(false),
vector_set_(false),
@@ -184,8 +184,7 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus,
pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
target_ = handle(raw_target(), isolate);
kind_ = target_->kind();
- state_ = (!for_queries_only && UseVector()) ? nexus->StateFromFeedback()
- : target_->ic_state();
+ state_ = UseVector() ? nexus->StateFromFeedback() : target_->ic_state();
old_state_ = state_;
extra_ic_state_ = target_->extra_ic_state();
}
@@ -213,16 +212,6 @@ Code* IC::GetCode() const {
}
-Code* IC::GetOriginalCode() const {
- HandleScope scope(isolate());
- Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
- DCHECK(Debug::HasDebugInfo(shared));
- Code* original_code = Debug::GetDebugInfo(shared)->original_code();
- DCHECK(original_code->IsCode());
- return original_code;
-}
-
-
bool IC::AddressIsOptimizedCode() const {
Code* host =
isolate()->inner_pointer_to_code_cache()->GetCacheEntry(address())->code;
@@ -697,8 +686,7 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
- Runtime::GetElementOrCharAt(isolate(), object, index, language_mode()),
- Object);
+ Object::GetElement(isolate(), object, index, language_mode()), Object);
return result;
}
@@ -735,7 +723,7 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
LookupIterator it(object, name);
LookupForRead(&it);
- if (it.IsFound() || !IsUndeclaredGlobal(object)) {
+ if (it.IsFound() || !ShouldThrowReferenceError(object)) {
// Update inline cache and stub cache.
if (use_ic) UpdateCaches(&it);
@@ -746,7 +734,7 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
isolate(), result, Object::GetProperty(&it, language_mode()), Object);
if (it.IsFound()) {
return result;
- } else if (!IsUndeclaredGlobal(object)) {
+ } else if (!ShouldThrowReferenceError(object)) {
LOG(isolate(), SuspectReadEvent(*name, *object));
return result;
}
@@ -1210,6 +1198,8 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
isolate());
if (!getter->IsJSFunction()) break;
if (!holder->HasFastProperties()) break;
+ // When debugging we need to go the slow path to flood the accessor.
+ if (GetSharedFunctionInfo()->HasDebugInfo()) break;
Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
if (!receiver->IsJSObject() && !function->IsBuiltin() &&
is_sloppy(function->shared()->language_mode())) {
@@ -1401,9 +1391,7 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
} else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
if (object->IsJSObject() || (object->IsString() && key->IsNumber())) {
Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
- if (object->IsString() || !Object::ToSmi(isolate(), key).is_null()) {
- stub = LoadElementStub(receiver);
- }
+ if (object->IsString() || key->IsSmi()) stub = LoadElementStub(receiver);
}
}
@@ -1493,6 +1481,27 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode) {
+ // Check if the name is trivially convertible to an index and set the element.
+ uint32_t index;
+ if (kind() == Code::KEYED_STORE_IC && name->AsArrayIndex(&index)) {
+ // Rewrite to the generic keyed store stub.
+ if (FLAG_use_ic) {
+ if (UseVector()) {
+ ConfigureVectorState(MEGAMORPHIC);
+ } else if (!AddressIsDeoptimizedCode()) {
+ set_target(*megamorphic_stub());
+ }
+ TRACE_IC("StoreIC", name);
+ TRACE_GENERIC_IC(isolate(), "StoreIC", "name as array index");
+ }
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::SetElement(isolate(), object, index, value, language_mode()),
+ Object);
+ return result;
+ }
+
if (object->IsGlobalObject() && name->IsString()) {
// Look up in script context table.
Handle<String> str_name = Handle<String>::cast(name);
@@ -1544,21 +1553,6 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
return TypeError(MessageTemplate::kNonObjectPropertyStore, object, name);
}
- // Check if the given name is an array index.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- // Ignore other stores where the receiver is not a JSObject.
- // TODO(1475): Must check prototype chains of object wrappers.
- if (!object->IsJSObject()) return value;
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- JSObject::SetElement(receiver, index, value, language_mode()), Object);
- return value;
- }
-
// Observed objects are always modified through the runtime.
if (object->IsHeapObject() &&
Handle<HeapObject>::cast(object)->map()->is_observed()) {
@@ -1786,6 +1780,8 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
TRACE_GENERIC_IC(isolate(), "StoreIC", "setter not a function");
break;
}
+ // When debugging we need to go the slow path to flood the accessor.
+ if (GetSharedFunctionInfo()->HasDebugInfo()) break;
Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
CallOptimization call_optimization(function);
NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
@@ -1956,8 +1952,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
if (store_mode != STANDARD_STORE) {
int external_arrays = 0;
for (int i = 0; i < target_receiver_maps.length(); ++i) {
- if (target_receiver_maps[i]->has_external_array_elements() ||
- target_receiver_maps[i]->has_fixed_typed_array_elements()) {
+ if (target_receiver_maps[i]->has_fixed_typed_array_elements()) {
external_arrays++;
}
}
@@ -1994,7 +1989,7 @@ Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
return Map::TransitionElementsTo(map, FAST_HOLEY_DOUBLE_ELEMENTS);
case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
- DCHECK(map->has_external_array_elements());
+ DCHECK(map->has_fixed_typed_array_elements());
// Fall through
case STORE_NO_TRANSITION_HANDLE_COW:
case STANDARD_STORE:
@@ -2006,25 +2001,24 @@ Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
}
-bool IsOutOfBoundsAccess(Handle<JSObject> receiver, int index) {
+bool IsOutOfBoundsAccess(Handle<JSObject> receiver, uint32_t index) {
+ uint32_t length = 0;
if (receiver->IsJSArray()) {
- return JSArray::cast(*receiver)->length()->IsSmi() &&
- index >= Smi::cast(JSArray::cast(*receiver)->length())->value();
+ JSArray::cast(*receiver)->length()->ToArrayLength(&length);
+ } else {
+ length = static_cast<uint32_t>(receiver->elements()->length());
}
- return index >= receiver->elements()->length();
+ return index >= length;
}
-KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
- Handle<Object> key,
- Handle<Object> value) {
- Handle<Smi> smi_key = Object::ToSmi(isolate(), key).ToHandleChecked();
- int index = smi_key->value();
+static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
+ uint32_t index, Handle<Object> value) {
bool oob_access = IsOutOfBoundsAccess(receiver, index);
// Don't consider this a growing store if the store would send the receiver to
// dictionary mode.
bool allow_growth = receiver->IsJSArray() && oob_access &&
- !receiver->WouldConvertToSlowElements(key);
+ !receiver->WouldConvertToSlowElements(index);
if (allow_growth) {
// Handle growing array in stub if necessary.
if (receiver->HasFastSmiElements()) {
@@ -2078,7 +2072,7 @@ KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
}
}
if (!FLAG_trace_external_array_abuse &&
- receiver->map()->has_external_array_elements() && oob_access) {
+ receiver->map()->has_fixed_typed_array_elements() && oob_access) {
return STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS;
}
Heap* heap = receiver->GetHeap();
@@ -2112,7 +2106,10 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
Handle<Object> store_handle;
Handle<Code> stub = megamorphic_stub();
- if (key->IsInternalizedString() || key->IsSymbol()) {
+ uint32_t index;
+ if ((key->IsInternalizedString() &&
+ !String::cast(*key)->AsArrayIndex(&index)) ||
+ key->IsSymbol()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), store_handle,
StoreIC::Store(object, Handle<Name>::cast(key), value,
@@ -2152,22 +2149,21 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
if (use_ic) {
- DCHECK(!object->IsAccessCheckNeeded());
-
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- bool key_is_smi_like = !Object::ToSmi(isolate(), key).is_null();
if (receiver->elements()->map() ==
isolate()->heap()->sloppy_arguments_elements_map() &&
!is_sloppy(language_mode())) {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "arguments receiver");
- } else if (key_is_smi_like) {
+ } else if (key->IsSmi() && Smi::cast(*key)->value() >= 0) {
+ uint32_t index = static_cast<uint32_t>(Smi::cast(*key)->value());
// We should go generic if receiver isn't a dictionary, but our
// prototype chain does have dictionary elements. This ensures that
// other non-dictionary receivers in the polymorphic case benefit
// from fast path keyed stores.
- if (!(receiver->map()->DictionaryElementsInPrototypeChainOnly())) {
- KeyedAccessStoreMode store_mode = GetStoreMode(receiver, key, value);
+ if (!receiver->map()->DictionaryElementsInPrototypeChainOnly()) {
+ KeyedAccessStoreMode store_mode =
+ GetStoreMode(receiver, index, value);
stub = StoreElementStub(receiver, store_mode);
} else {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "dictionary prototype");
@@ -2331,7 +2327,7 @@ void CallIC::HandleMiss(Handle<Object> function) {
//
// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(CallIC_Miss) {
+RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK(args.length() == 3);
@@ -2346,7 +2342,7 @@ RUNTIME_FUNCTION(CallIC_Miss) {
}
-RUNTIME_FUNCTION(CallIC_Customization_Miss) {
+RUNTIME_FUNCTION(Runtime_CallIC_Customization_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK(args.length() == 3);
@@ -2363,7 +2359,7 @@ RUNTIME_FUNCTION(CallIC_Customization_Miss) {
// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(LoadIC_Miss) {
+RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
@@ -2394,7 +2390,7 @@ RUNTIME_FUNCTION(LoadIC_Miss) {
// Used from ic-<arch>.cc
-RUNTIME_FUNCTION(KeyedLoadIC_Miss) {
+RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
@@ -2413,7 +2409,7 @@ RUNTIME_FUNCTION(KeyedLoadIC_Miss) {
}
-RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure) {
+RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
@@ -2434,7 +2430,7 @@ RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure) {
// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(StoreIC_Miss) {
+RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
@@ -2472,7 +2468,7 @@ RUNTIME_FUNCTION(StoreIC_Miss) {
}
-RUNTIME_FUNCTION(StoreIC_MissFromStubFailure) {
+RUNTIME_FUNCTION(Runtime_StoreIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
@@ -2481,7 +2477,7 @@ RUNTIME_FUNCTION(StoreIC_MissFromStubFailure) {
Handle<Object> result;
if (FLAG_vector_stores) {
- DCHECK(args.length() == 5);
+ DCHECK(args.length() == 5 || args.length() == 6);
Handle<Smi> slot = args.at<Smi>(3);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
@@ -2511,7 +2507,7 @@ RUNTIME_FUNCTION(StoreIC_MissFromStubFailure) {
// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(KeyedStoreIC_Miss) {
+RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
@@ -2540,7 +2536,7 @@ RUNTIME_FUNCTION(KeyedStoreIC_Miss) {
}
-RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure) {
+RUNTIME_FUNCTION(Runtime_KeyedStoreIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
@@ -2569,7 +2565,7 @@ RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure) {
}
-RUNTIME_FUNCTION(StoreIC_Slow) {
+RUNTIME_FUNCTION(Runtime_StoreIC_Slow) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
@@ -2585,7 +2581,7 @@ RUNTIME_FUNCTION(StoreIC_Slow) {
}
-RUNTIME_FUNCTION(KeyedStoreIC_Slow) {
+RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
@@ -2601,15 +2597,16 @@ RUNTIME_FUNCTION(KeyedStoreIC_Slow) {
}
-RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss) {
+RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK(args.length() == 4);
KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
- Handle<Object> value = args.at<Object>(0);
- Handle<Map> map = args.at<Map>(1);
- Handle<Object> key = args.at<Object>(2);
- Handle<Object> object = args.at<Object>(3);
+ Handle<Object> object = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ Handle<Object> value = args.at<Object>(2);
+ Handle<Map> map = args.at<Map>(3);
+
LanguageMode language_mode = ic.language_mode();
if (object->IsJSObject()) {
JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
@@ -2698,7 +2695,7 @@ MaybeHandle<Object> BinaryOpIC::Transition(
}
-RUNTIME_FUNCTION(BinaryOpIC_Miss) {
+RUNTIME_FUNCTION(Runtime_BinaryOpIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -2713,7 +2710,7 @@ RUNTIME_FUNCTION(BinaryOpIC_Miss) {
}
-RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite) {
+RUNTIME_FUNCTION(Runtime_BinaryOpIC_MissWithAllocationSite) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
@@ -2792,7 +2789,7 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Used from CompareICStub::GenerateMiss in code-stubs-<arch>.cc.
-RUNTIME_FUNCTION(CompareIC_Miss) {
+RUNTIME_FUNCTION(Runtime_CompareIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK(args.length() == 3);
@@ -2853,7 +2850,7 @@ Handle<Object> CompareNilIC::CompareNil(Handle<Object> object) {
}
-RUNTIME_FUNCTION(CompareNilIC_Miss) {
+RUNTIME_FUNCTION(Runtime_CompareNilIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
Handle<Object> object = args.at<Object>(0);
@@ -2862,7 +2859,7 @@ RUNTIME_FUNCTION(CompareNilIC_Miss) {
}
-RUNTIME_FUNCTION(Unreachable) {
+RUNTIME_FUNCTION(Runtime_Unreachable) {
UNREACHABLE();
CHECK(false);
return isolate->heap()->undefined_value();
@@ -2914,7 +2911,7 @@ Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
}
-RUNTIME_FUNCTION(ToBooleanIC_Miss) {
+RUNTIME_FUNCTION(Runtime_ToBooleanIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
DCHECK(args.length() == 1);
HandleScope scope(isolate);
@@ -2924,7 +2921,7 @@ RUNTIME_FUNCTION(ToBooleanIC_Miss) {
}
-RUNTIME_FUNCTION(StoreCallbackProperty) {
+RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
Handle<JSObject> receiver = args.at<JSObject>(0);
Handle<JSObject> holder = args.at<JSObject>(1);
Handle<HeapObject> callback_or_cell = args.at<HeapObject>(2);
@@ -2961,7 +2958,7 @@ RUNTIME_FUNCTION(StoreCallbackProperty) {
* Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
* provide any value for the given name.
*/
-RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) {
+RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptorOnly) {
DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
Handle<Name> name =
args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
@@ -2980,27 +2977,11 @@ RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) {
}
-static Object* ThrowReferenceError(Isolate* isolate, Name* name) {
- // If the load is non-contextual, just return the undefined result.
- // Note that both keyed and non-keyed loads may end up here.
- HandleScope scope(isolate);
- LoadIC ic(IC::NO_EXTRA_FRAME, isolate, true);
- if (ic.contextual_mode() != CONTEXTUAL) {
- return isolate->heap()->undefined_value();
- }
-
- // Throw a reference error.
- Handle<Name> name_handle(name);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewReferenceError(MessageTemplate::kNotDefined, name_handle));
-}
-
-
/**
* Loads a property with an interceptor performing post interceptor
* lookup if interceptor failed.
*/
-RUNTIME_FUNCTION(LoadPropertyWithInterceptor) {
+RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
HandleScope scope(isolate);
DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
Handle<Name> name =
@@ -3018,14 +2999,25 @@ RUNTIME_FUNCTION(LoadPropertyWithInterceptor) {
if (it.IsFound()) return *result;
- return ThrowReferenceError(isolate, Name::cast(args[0]));
+ // Return the undefined result if the reference error should not be thrown.
+ // Note that both keyed and non-keyed loads may end up here.
+ LoadICNexus nexus(isolate);
+ LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ if (!ic.ShouldThrowReferenceError(it.GetReceiver())) {
+ return isolate->heap()->undefined_value();
+ }
+
+ // Throw a reference error.
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewReferenceError(MessageTemplate::kNotDefined, it.name()));
}
-RUNTIME_FUNCTION(StorePropertyWithInterceptor) {
+RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ StoreICNexus nexus(isolate);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
Handle<JSObject> receiver = args.at<JSObject>(0);
Handle<Name> name = args.at<Name>(1);
Handle<Object> value = args.at<Object>(2);
@@ -3051,7 +3043,7 @@ RUNTIME_FUNCTION(StorePropertyWithInterceptor) {
}
-RUNTIME_FUNCTION(LoadElementWithInterceptor) {
+RUNTIME_FUNCTION(Runtime_LoadElementWithInterceptor) {
// TODO(verwaest): This should probably get the holder and receiver as input.
HandleScope scope(isolate);
Handle<JSObject> receiver = args.at<JSObject>(0);
@@ -3067,7 +3059,7 @@ RUNTIME_FUNCTION(LoadElementWithInterceptor) {
}
-RUNTIME_FUNCTION(LoadIC_MissFromStubFailure) {
+RUNTIME_FUNCTION(Runtime_LoadIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
@@ -3096,15 +3088,5 @@ RUNTIME_FUNCTION(LoadIC_MissFromStubFailure) {
return *result;
}
-
-
-static const Address IC_utilities[] = {
-#define ADDR(name) FUNCTION_ADDR(name),
- IC_UTIL_LIST(ADDR) NULL
-#undef ADDR
-};
-
-
-Address IC::AddressFromUtilityId(IC::UtilityId id) { return IC_utilities[id]; }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index dec8318ae5..ee5fd261dc 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -12,45 +12,11 @@
namespace v8 {
namespace internal {
-
-// IC_UTIL_LIST defines all utility functions called from generated
-// inline caching code. The argument for the macro, ICU, is the function name.
-#define IC_UTIL_LIST(ICU) \
- ICU(LoadIC_Miss) \
- ICU(KeyedLoadIC_Miss) \
- ICU(CallIC_Miss) \
- ICU(CallIC_Customization_Miss) \
- ICU(StoreIC_Miss) \
- ICU(StoreIC_Slow) \
- ICU(KeyedStoreIC_Miss) \
- ICU(KeyedStoreIC_Slow) \
- /* Utilities for IC stubs. */ \
- ICU(StoreCallbackProperty) \
- ICU(LoadPropertyWithInterceptorOnly) \
- ICU(LoadPropertyWithInterceptor) \
- ICU(LoadElementWithInterceptor) \
- ICU(StorePropertyWithInterceptor) \
- ICU(CompareIC_Miss) \
- ICU(BinaryOpIC_Miss) \
- ICU(CompareNilIC_Miss) \
- ICU(Unreachable) \
- ICU(ToBooleanIC_Miss)
//
// IC is the base class for LoadIC, StoreIC, KeyedLoadIC, and KeyedStoreIC.
//
class IC {
public:
- // The ids for utility called from the generated code.
- enum UtilityId {
-#define CONST_NAME(name) k##name,
- IC_UTIL_LIST(CONST_NAME)
-#undef CONST_NAME
- kUtilityCount
- };
-
- // Looks up the address of the named utility.
- static Address AddressFromUtilityId(UtilityId id);
-
// Alias the inline cache state type to make the IC code more readable.
typedef InlineCacheState State;
@@ -60,8 +26,7 @@ class IC {
// Construct the IC structure with the given number of extra
// JavaScript frames on the stack.
- IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL,
- bool for_queries_only = false);
+ IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL);
virtual ~IC() {}
State state() const { return state_; }
@@ -131,8 +96,6 @@ class IC {
SharedFunctionInfo* GetSharedFunctionInfo() const;
// Get the code object of the caller.
Code* GetCode() const;
- // Get the original (non-breakpointed) code object of the caller.
- Code* GetOriginalCode() const;
bool AddressIsOptimizedCode() const;
inline bool AddressIsDeoptimizedCode() const;
@@ -310,23 +273,6 @@ class IC {
};
-// An IC_Utility encapsulates IC::UtilityId. It exists mainly because you
-// cannot make forward declarations to an enum.
-class IC_Utility {
- public:
- explicit IC_Utility(IC::UtilityId id)
- : address_(IC::AddressFromUtilityId(id)), id_(id) {}
-
- Address address() const { return address_; }
-
- IC::UtilityId id() const { return id_; }
-
- private:
- Address address_;
- IC::UtilityId id_;
-};
-
-
class CallIC : public IC {
public:
CallIC(Isolate* isolate, CallICNexus* nexus)
@@ -354,13 +300,13 @@ class CallIC : public IC {
class LoadIC : public IC {
public:
- static ExtraICState ComputeExtraICState(ContextualMode contextual_mode,
+ static ExtraICState ComputeExtraICState(TypeofMode typeof_mode,
LanguageMode language_mode) {
- return LoadICState(contextual_mode, language_mode).GetExtraICState();
+ return LoadICState(typeof_mode, language_mode).GetExtraICState();
}
- ContextualMode contextual_mode() const {
- return LoadICState::GetContextualMode(extra_ic_state());
+ TypeofMode typeof_mode() const {
+ return LoadICState::GetTypeofMode(extra_ic_state());
}
LanguageMode language_mode() const {
@@ -373,24 +319,8 @@ class LoadIC : public IC {
DCHECK(IsLoadStub());
}
- // TODO(mvstanton): The for_queries_only is because we have a case where we
- // construct an IC only to gather the contextual mode, and we don't have
- // vector/slot information. for_queries_only is a temporary hack to enable the
- // strong DCHECK protection around vector/slot.
- LoadIC(FrameDepth depth, Isolate* isolate, bool for_queries_only)
- : IC(depth, isolate, NULL, for_queries_only) {
- DCHECK(IsLoadStub());
- }
-
- // Returns if this IC is for contextual (no explicit receiver)
- // access to properties.
- bool IsUndeclaredGlobal(Handle<Object> receiver) {
- if (receiver->IsGlobalObject()) {
- return contextual_mode() == CONTEXTUAL;
- } else {
- DCHECK(contextual_mode() != CONTEXTUAL);
- return false;
- }
+ bool ShouldThrowReferenceError(Handle<Object> receiver) {
+ return receiver->IsGlobalObject() && typeof_mode() == NOT_INSIDE_TYPEOF;
}
// Code generator routines.
@@ -452,10 +382,10 @@ class KeyedLoadIC : public LoadIC {
class IcCheckTypeField
: public BitField<IcCheckType, LoadICState::kNextBitFieldOffset, 1> {};
- static ExtraICState ComputeExtraICState(ContextualMode contextual_mode,
+ static ExtraICState ComputeExtraICState(TypeofMode typeof_mode,
LanguageMode language_mode,
IcCheckType key_type) {
- return LoadICState(contextual_mode, language_mode).GetExtraICState() |
+ return LoadICState(typeof_mode, language_mode).GetExtraICState() |
IcCheckTypeField::encode(key_type);
}
@@ -666,9 +596,6 @@ class KeyedStoreIC : public StoreIC {
static void Clear(Isolate* isolate, Address address, Code* target,
Address constant_pool);
- KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
- Handle<Object> key, Handle<Object> value);
-
Handle<Map> ComputeTransitionedMap(Handle<Map> map,
KeyedAccessStoreMode store_mode);
@@ -750,25 +677,6 @@ class ToBooleanIC : public IC {
enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
-DECLARE_RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure);
-DECLARE_RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure);
-DECLARE_RUNTIME_FUNCTION(UnaryOpIC_Miss);
-DECLARE_RUNTIME_FUNCTION(StoreIC_MissFromStubFailure);
-DECLARE_RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss);
-DECLARE_RUNTIME_FUNCTION(BinaryOpIC_Miss);
-DECLARE_RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite);
-DECLARE_RUNTIME_FUNCTION(CompareNilIC_Miss);
-DECLARE_RUNTIME_FUNCTION(ToBooleanIC_Miss);
-DECLARE_RUNTIME_FUNCTION(LoadIC_MissFromStubFailure);
-
-// Support functions for callbacks handlers.
-DECLARE_RUNTIME_FUNCTION(StoreCallbackProperty);
-
-// Support functions for interceptor handlers.
-DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly);
-DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptor);
-DECLARE_RUNTIME_FUNCTION(LoadElementWithInterceptor);
-DECLARE_RUNTIME_FUNCTION(StorePropertyWithInterceptor);
}
} // namespace v8::internal
diff --git a/deps/v8/src/ic/mips/access-compiler-mips.cc b/deps/v8/src/ic/mips/access-compiler-mips.cc
index eb519d22e4..9aba385497 100644
--- a/deps/v8/src/ic/mips/access-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/access-compiler-mips.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/ic/access-compiler.h"
@@ -33,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(a3.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ DCHECK(a3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, a3, t0, t1};
return registers;
}
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index 13ce921b8b..e3d4ae3adc 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/ic/call-optimization.h"
@@ -216,10 +214,9 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, IC::UtilityId id) {
+ Handle<JSObject> holder_obj, Runtime::FunctionId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
- NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
}
@@ -305,9 +302,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
}
@@ -318,9 +313,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
}
@@ -342,11 +335,17 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
+void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
+ Register scratch) {
+ DCHECK(false); // Not implemented.
+}
+
+
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register map_reg,
Register scratch,
Label* miss) {
Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- Register map_reg = StoreTransitionDescriptor::MapRegister();
DCHECK(!map_reg.is(scratch));
__ LoadWeakValue(map_reg, cell, miss);
if (transition->CanBeDeprecated()) {
@@ -656,7 +655,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
// of this method).
CompileCallLoadPropertyWithInterceptor(
masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
+ Runtime::kLoadPropertyWithInterceptorOnly);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
@@ -687,10 +686,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
+ NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
}
@@ -713,9 +710,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(at, value());
// Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -727,9 +722,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
- ExternalReference store_ic_property = ExternalReference(
- IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
- __ TailCallExternalReference(store_ic_property, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/mips/ic-compiler-mips.cc b/deps/v8/src/ic/mips/ic-compiler-mips.cc
index 7b88f32331..80f5c4783f 100644
--- a/deps/v8/src/ic/mips/ic-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/ic-compiler-mips.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/ic/ic.h"
@@ -48,7 +46,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
// Polymorphic keyed stores may use the map register
Register map_reg = scratch1();
DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ map_reg.is(StoreTransitionDescriptor::MapRegister()));
int receiver_count = maps->length();
int number_of_handled_maps = 0;
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index 5a6f95a231..a673dbf254 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/codegen.h"
@@ -320,9 +317,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
int arg_count = 4;
- __ TailCallExternalReference(ref, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
}
@@ -351,11 +347,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
int arg_count = 4;
- __ TailCallExternalReference(ref, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
}
@@ -440,16 +433,17 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, t0, t1, t2, t5));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_load_dummy_vector());
- int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ li(slot, Operand(Smi::FromInt(slot_index)));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::LOAD_IC, flags, false, receiver, key, t0, t1, t2, t5);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, flags,
+ receiver, key, t0, t1, t2, t5);
// Cache miss.
GenerateMiss(masm);
@@ -656,8 +650,11 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// Check if the object is a JS array or not.
__ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
__ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
- // Check that the object is some kind of JSObject.
- __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
+ // Check that the object is some kind of JS object EXCEPT JS Value type. In
+ // the case that the object is a value-wrapper object, we enter the runtime
+ // system to make sure that indexing into string objects works as intended.
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ __ Branch(&slow, lo, t0, Operand(JS_OBJECT_TYPE));
// Object case: Check key against length in the elements array.
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -685,17 +682,18 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Register vector = VectorStoreICDescriptor::VectorRegister();
Register slot = VectorStoreICDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, a3, t0, t1, t2));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_store_dummy_vector());
- int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ LoadRoot(vector, Heap::kKeyedStoreDummyVectorRootIndex);
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ li(slot, Operand(Smi::FromInt(slot_index)));
}
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, false, receiver, key, a3, t0, t1, t2);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+ receiver, key, a3, t0, t1, t2);
// Cache miss.
__ Branch(&miss);
@@ -743,14 +741,24 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
}
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+ } else {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
+ }
+}
+
+
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ int args = FLAG_vector_stores ? 5 : 3;
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
}
@@ -764,8 +772,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, false, receiver, name, a3, t0, t1, t2);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+ receiver, name, a3, t0, t1, t2);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -773,12 +781,11 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
void StoreIC::GenerateMiss(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
+
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ int args = FLAG_vector_stores ? 5 : 3;
+ __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
}
diff --git a/deps/v8/src/ic/mips/stub-cache-mips.cc b/deps/v8/src/ic/mips/stub-cache-mips.cc
index 2048531aba..12cacc8f4f 100644
--- a/deps/v8/src/ic/mips/stub-cache-mips.cc
+++ b/deps/v8/src/ic/mips/stub-cache-mips.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/codegen.h"
@@ -18,7 +16,7 @@ namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags,
StubCache::Table table, Register receiver, Register name,
// Number of the cache entry, not scaled.
Register offset, Register scratch, Register scratch2,
@@ -81,8 +79,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
#endif
- if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
-
// Jump to the first instruction in the code stub.
__ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
@@ -93,10 +89,9 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, bool leave_frame,
- Register receiver, Register name,
- Register scratch, Register extra, Register extra2,
- Register extra3) {
+ Code::Flags flags, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
@@ -146,8 +141,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ And(scratch, scratch, Operand(mask));
// Probe the primary table.
- ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
- name, scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
+ extra, extra2, extra3);
// Primary miss: Compute hash for secondary probe.
__ srl(at, name, kCacheIndexShift);
@@ -157,8 +152,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ And(scratch, scratch, Operand(mask2));
// Probe the secondary table.
- ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
- name, scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
+ extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/mips64/access-compiler-mips64.cc b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
index 74b4b93240..a2e7aed4dc 100644
--- a/deps/v8/src/ic/mips64/access-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/ic/access-compiler.h"
@@ -33,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(a3.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ DCHECK(a3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, a3, a4, a5};
return registers;
}
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index d83c807e3c..49e9265aee 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/ic/call-optimization.h"
@@ -217,10 +215,9 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, IC::UtilityId id) {
+ Handle<JSObject> holder_obj, Runtime::FunctionId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
- NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
}
@@ -306,9 +303,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
}
@@ -319,9 +314,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
}
@@ -343,11 +336,17 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
+void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
+ Register scratch) {
+ DCHECK(false); // Not implemented.
+}
+
+
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register map_reg,
Register scratch,
Label* miss) {
Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- Register map_reg = StoreTransitionDescriptor::MapRegister();
DCHECK(!map_reg.is(scratch));
__ LoadWeakValue(map_reg, cell, miss);
if (transition->CanBeDeprecated()) {
@@ -657,7 +656,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
// of this method).
CompileCallLoadPropertyWithInterceptor(
masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
+ Runtime::kLoadPropertyWithInterceptorOnly);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
@@ -688,10 +687,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
+ NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
}
@@ -714,9 +711,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(at, value());
// Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -728,9 +723,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
- ExternalReference store_ic_property = ExternalReference(
- IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
- __ TailCallExternalReference(store_ic_property, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
index df67fb9a81..a834430e1e 100644
--- a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/ic/ic.h"
@@ -48,7 +46,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
// Polymorphic keyed stores may use the map register
Register map_reg = scratch1();
DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ map_reg.is(StoreTransitionDescriptor::MapRegister()));
int receiver_count = maps->length();
int number_of_handled_maps = 0;
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index a6075dfcaa..6f3916dd2e 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/codegen.h"
@@ -317,9 +314,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
int arg_count = 4;
- __ TailCallExternalReference(ref, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
}
@@ -348,11 +344,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
int arg_count = 4;
- __ TailCallExternalReference(ref, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
}
@@ -437,16 +430,17 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, a4, a5, a6, t1));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_load_dummy_vector());
- int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ li(slot, Operand(Smi::FromInt(slot_index)));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::LOAD_IC, flags, false, receiver, key, a4, a5, a6, t1);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, flags,
+ receiver, key, a4, a5, a6, t1);
// Cache miss.
GenerateMiss(masm);
@@ -686,17 +680,18 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, a3, a4, a5, a6));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_store_dummy_vector());
- int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ LoadRoot(vector, Heap::kKeyedStoreDummyVectorRootIndex);
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ li(slot, Operand(Smi::FromInt(slot_index)));
}
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, false, receiver, key, a3, a4, a5, a6);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+ receiver, key, a3, a4, a5, a6);
// Cache miss.
__ Branch(&miss);
@@ -744,14 +739,24 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
}
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+ } else {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
+ }
+}
+
+
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ int args = FLAG_vector_stores ? 5 : 3;
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
}
@@ -765,8 +770,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, false, receiver, name, a3, a4, a5, a6);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+ receiver, name, a3, a4, a5, a6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -774,12 +779,11 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
void StoreIC::GenerateMiss(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
+
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ int args = FLAG_vector_stores ? 5 : 3;
+ __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
}
diff --git a/deps/v8/src/ic/mips64/stub-cache-mips64.cc b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
index 0d612903aa..b1ec640719 100644
--- a/deps/v8/src/ic/mips64/stub-cache-mips64.cc
+++ b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/codegen.h"
@@ -18,7 +16,7 @@ namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags,
StubCache::Table table, Register receiver, Register name,
// Number of the cache entry, not scaled.
Register offset, Register scratch, Register scratch2,
@@ -83,8 +81,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
#endif
- if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
-
// Jump to the first instruction in the code stub.
__ Daddu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
@@ -95,10 +91,9 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, bool leave_frame,
- Register receiver, Register name,
- Register scratch, Register extra, Register extra2,
- Register extra3) {
+ Code::Flags flags, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
@@ -149,8 +144,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ And(scratch, scratch, Operand(mask));
// Probe the primary table.
- ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
- name, scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
+ extra, extra2, extra3);
// Primary miss: Compute hash for secondary probe.
__ dsrl(at, name, kCacheIndexShift);
@@ -160,8 +155,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ And(scratch, scratch, Operand(mask2));
// Probe the secondary table.
- ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
- name, scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
+ extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/ppc/OWNERS b/deps/v8/src/ic/ppc/OWNERS
index a04d29a94f..eb007cb908 100644
--- a/deps/v8/src/ic/ppc/OWNERS
+++ b/deps/v8/src/ic/ppc/OWNERS
@@ -1,3 +1,4 @@
+jyan@ca.ibm.com
dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
diff --git a/deps/v8/src/ic/ppc/access-compiler-ppc.cc b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
index aa3859a62c..2021b80fd8 100644
--- a/deps/v8/src/ic/ppc/access-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
#include "src/ic/access-compiler.h"
@@ -33,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(r6.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ DCHECK(r6.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, r6, r7, r8};
return registers;
}
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index 8988b08e2c..9ec2f5ff3f 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
#include "src/ic/call-optimization.h"
@@ -221,10 +219,9 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, IC::UtilityId id) {
+ Handle<JSObject> holder_obj, Runtime::FunctionId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
- NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
}
@@ -314,9 +311,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
}
@@ -327,9 +322,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
}
@@ -351,11 +344,17 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
+void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
+ Register scratch) {
+ DCHECK(false); // Not implemented.
+}
+
+
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register map_reg,
Register scratch,
Label* miss) {
Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- Register map_reg = StoreTransitionDescriptor::MapRegister();
DCHECK(!map_reg.is(scratch));
__ LoadWeakValue(map_reg, cell, miss);
if (transition->CanBeDeprecated()) {
@@ -661,7 +660,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
// of this method.)
CompileCallLoadPropertyWithInterceptor(
masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
+ Runtime::kLoadPropertyWithInterceptorOnly);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
@@ -693,10 +692,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
+ NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
}
@@ -720,9 +717,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(ip, value());
// Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -734,9 +729,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
- ExternalReference store_ic_property = ExternalReference(
- IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
- __ TailCallExternalReference(store_ic_property, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
index ad72c231de..59054b2058 100644
--- a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
#include "src/ic/ic.h"
@@ -63,7 +61,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
// Polymorphic keyed stores may use the map register
Register map_reg = scratch1();
DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ map_reg.is(StoreTransitionDescriptor::MapRegister()));
int receiver_count = maps->length();
int number_of_handled_maps = 0;
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
index db56fbaf66..7cac3058bb 100644
--- a/deps/v8/src/ic/ppc/ic-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
#include "src/codegen.h"
@@ -323,9 +321,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
int arg_count = 4;
- __ TailCallExternalReference(ref, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
}
@@ -354,10 +351,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
int arg_count = 4;
- __ TailCallExternalReference(ref, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
}
@@ -444,16 +439,17 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, r7, r8, r9, r10));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_load_dummy_vector());
- int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ LoadRoot(vector, Heap::kKeyedLoadDummyVectorRootIndex);
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::KEYED_LOAD_IC, flags, false, receiver, key, r7, r8, r9, r10);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
+ receiver, key, r7, r8, r9, r10);
// Cache miss.
GenerateMiss(masm);
@@ -477,14 +473,24 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
}
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+ } else {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
+ }
+}
+
+
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ int args = FLAG_vector_stores ? 5 : 3;
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
}
@@ -706,17 +712,18 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Register vector = VectorStoreICDescriptor::VectorRegister();
Register slot = VectorStoreICDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, r6, r7, r8, r9));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_store_dummy_vector());
- int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
- __ LoadRoot(vector, Heap::kKeyedStoreDummyVectorRootIndex);
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
}
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, false, receiver, key, r6, r7, r8, r9);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+ receiver, key, r6, r7, r8, r9);
// Cache miss.
__ b(&miss);
@@ -777,8 +784,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, false, receiver, name, r6, r7, r8, r9);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+ receiver, name, r6, r7, r8, r9);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -786,13 +793,11 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
void StoreIC::GenerateMiss(MacroAssembler* masm) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ int args = FLAG_vector_stores ? 5 : 3;
+ __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
}
diff --git a/deps/v8/src/ic/ppc/stub-cache-ppc.cc b/deps/v8/src/ic/ppc/stub-cache-ppc.cc
index 57b32452d6..ed703fb21e 100644
--- a/deps/v8/src/ic/ppc/stub-cache-ppc.cc
+++ b/deps/v8/src/ic/ppc/stub-cache-ppc.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
#include "src/codegen.h"
@@ -18,7 +16,7 @@ namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags,
StubCache::Table table, Register receiver, Register name,
// Number of the cache entry, not scaled.
Register offset, Register scratch, Register scratch2,
@@ -95,8 +93,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
#endif
- if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
-
// Jump to the first instruction in the code stub.
__ addi(r0, code, Operand(Code::kHeaderSize - kHeapObjectTag));
__ mtctr(r0);
@@ -108,10 +104,9 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, bool leave_frame,
- Register receiver, Register name,
- Register scratch, Register extra, Register extra2,
- Register extra3) {
+ Code::Flags flags, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
@@ -165,8 +160,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
- ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
- name, scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
+ extra, extra2, extra3);
// Primary miss: Compute hash for secondary probe.
__ sub(scratch, scratch, name);
@@ -175,8 +170,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
- ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
- name, scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
+ extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index 630e671613..4a5f9bd7ad 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/ic/stub-cache.h"
#include "src/base/bits.h"
-#include "src/ic/stub-cache.h"
#include "src/type-info.h"
namespace v8 {
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index 4223b28dda..cb1b62848e 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -53,9 +53,9 @@ class StubCache {
// registers. Set to no_reg if not needed.
// If leave_frame is true, then exit a frame before the tail call.
void GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2 = no_reg, Register extra3 = no_reg);
+ Code::Flags flags, Register receiver, Register name,
+ Register scratch, Register extra, Register extra2 = no_reg,
+ Register extra3 = no_reg);
enum Table { kPrimary, kSecondary };
diff --git a/deps/v8/src/ic/x64/access-compiler-x64.cc b/deps/v8/src/ic/x64/access-compiler-x64.cc
index 6acc950bc0..63e60f0b91 100644
--- a/deps/v8/src/ic/x64/access-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/access-compiler-x64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/ic/access-compiler.h"
@@ -33,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(rbx.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ DCHECK(rbx.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, rbx, rdi, r8};
return registers;
}
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index 43754c32b1..920d06c541 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/ic/call-optimization.h"
@@ -115,10 +113,9 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, IC::UtilityId id) {
+ Handle<JSObject> holder_obj, Runtime::FunctionId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
- NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
}
@@ -321,8 +318,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
}
@@ -331,8 +327,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
}
@@ -354,11 +349,17 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
+void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
+ Register scratch) {
+ DCHECK(false); // Not implemented.
+}
+
+
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register map_reg,
Register scratch,
Label* miss) {
Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- Register map_reg = StoreTransitionDescriptor::MapRegister();
DCHECK(!map_reg.is(scratch));
__ LoadWeakValue(map_reg, cell, miss);
if (transition->CanBeDeprecated()) {
@@ -671,7 +672,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
// of this method.)
CompileCallLoadPropertyWithInterceptor(
masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
+ Runtime::kLoadPropertyWithInterceptorOnly);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
@@ -705,10 +706,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
holder());
__ PushReturnAddressFrom(scratch2());
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
+ NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
}
@@ -733,9 +732,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -751,9 +748,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
- ExternalReference store_ic_property = ExternalReference(
- IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
- __ TailCallExternalReference(store_ic_property, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/x64/ic-compiler-x64.cc b/deps/v8/src/ic/x64/ic-compiler-x64.cc
index 4fcd7b7d3c..d5e548412c 100644
--- a/deps/v8/src/ic/x64/ic-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/ic-compiler-x64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/ic/ic.h"
@@ -105,7 +103,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
// Polymorphic keyed stores may use the map register
Register map_reg = scratch1();
DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ map_reg.is(StoreTransitionDescriptor::MapRegister()));
__ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = maps->length();
int number_of_handled_maps = 0;
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index 3556d21000..8d334809cb 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/codegen.h"
@@ -345,16 +343,17 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadDescriptor::SlotRegister();
DCHECK(!AreAliased(megamorphic_scratch, vector, slot));
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_load_dummy_vector());
- int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ Move(vector, dummy_vector);
__ Move(slot, Smi::FromInt(slot_index));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
- false, receiver, key,
+ receiver, key,
megamorphic_scratch, no_reg);
// Cache miss.
GenerateMiss(masm);
@@ -541,8 +540,11 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ CmpInstanceType(r9, JS_ARRAY_TYPE);
__ j(equal, &array);
- // Check that the object is some kind of JSObject.
- __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE);
+ // Check that the object is some kind of JS object EXCEPT JS Value type. In
+ // the case that the object is a value-wrapper object, we enter the runtime
+ // system to make sure that indexing into string objects works as intended.
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ __ CmpInstanceType(r9, JS_OBJECT_TYPE);
__ j(below, &slow);
// Object case: Check key against length in the elements array.
@@ -569,17 +571,18 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Register slot = VectorStoreICDescriptor::SlotRegister();
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_store_dummy_vector());
- int slot_index = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
__ Move(vector, dummy_vector);
__ Move(slot, Smi::FromInt(slot_index));
}
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, false, receiver, key, rbx, no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+ receiver, key, rbx, no_reg);
// Cache miss.
__ jmp(&miss);
@@ -673,10 +676,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
int arg_count = 4;
- __ TailCallExternalReference(ref, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
}
@@ -708,10 +709,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
int arg_count = 4;
- __ TailCallExternalReference(ref, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
}
@@ -742,7 +741,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, false, StoreDescriptor::ReceiverRegister(),
+ masm, Code::STORE_IC, flags, StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister(), rbx, no_reg);
// Cache miss: Jump to runtime.
@@ -754,14 +753,21 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
+ Register temp = r11;
+ DCHECK(!temp.is(receiver) && !temp.is(name) && !temp.is(value));
- DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value));
-
- __ PopReturnAddressTo(rbx);
+ __ PopReturnAddressTo(temp);
__ Push(receiver);
__ Push(name);
__ Push(value);
- __ PushReturnAddressFrom(rbx);
+ if (FLAG_vector_stores) {
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ DCHECK(!temp.is(slot) && !temp.is(vector));
+ __ Push(slot);
+ __ Push(vector);
+ }
+ __ PushReturnAddressFrom(temp);
}
@@ -770,9 +776,8 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ int args = FLAG_vector_stores ? 5 : 3;
+ __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
}
@@ -801,9 +806,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ int args = FLAG_vector_stores ? 5 : 3;
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
}
diff --git a/deps/v8/src/ic/x64/stub-cache-x64.cc b/deps/v8/src/ic/x64/stub-cache-x64.cc
index 34f51626a6..3908018927 100644
--- a/deps/v8/src/ic/x64/stub-cache-x64.cc
+++ b/deps/v8/src/ic/x64/stub-cache-x64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/codegen.h"
@@ -18,7 +16,7 @@ namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags,
StubCache::Table table, Register receiver, Register name,
// The offset is scaled by 4, based on
// kCacheIndexShift, which is two bits
@@ -74,8 +72,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
#endif
- if (leave_frame) __ leave();
-
// Jump to the first instruction in the code stub.
__ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(kScratchRegister);
@@ -85,10 +81,9 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, bool leave_frame,
- Register receiver, Register name,
- Register scratch, Register extra, Register extra2,
- Register extra3) {
+ Code::Flags flags, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
USE(extra); // The register extra is not used on the X64 platform.
@@ -137,8 +132,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
- ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
- name, scratch);
+ ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch);
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
@@ -150,8 +144,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
- ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
- name, scratch);
+ ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name,
+ scratch);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/x87/access-compiler-x87.cc b/deps/v8/src/ic/x87/access-compiler-x87.cc
index d5fde5d4b8..bdcbb166b9 100644
--- a/deps/v8/src/ic/x87/access-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/access-compiler-x87.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/ic/access-compiler.h"
@@ -32,7 +30,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(ebx.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ DCHECK(ebx.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, ebx, edi, no_reg};
return registers;
}
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index 056bd952c7..c0d5fd8234 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/ic/call-optimization.h"
@@ -294,10 +292,9 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, IC::UtilityId id) {
+ Handle<JSObject> holder_obj, Runtime::FunctionId id) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
- NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
}
@@ -321,8 +318,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
}
@@ -331,8 +327,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
}
@@ -354,11 +349,20 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
+void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
+ Register scratch) {
+ // Get the return address, push the argument and then continue.
+ __ pop(scratch);
+ __ push(map_reg);
+ __ push(scratch);
+}
+
+
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+ Register map_reg,
Register scratch,
Label* miss) {
Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- Register map_reg = StoreTransitionDescriptor::MapRegister();
DCHECK(!map_reg.is(scratch));
__ LoadWeakValue(map_reg, cell, miss);
if (transition->CanBeDeprecated()) {
@@ -669,7 +673,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
// of this method.)
CompileCallLoadPropertyWithInterceptor(
masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
+ Runtime::kLoadPropertyWithInterceptorOnly);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
@@ -710,10 +714,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
holder());
__ push(scratch2()); // restore old return address
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
+ NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
}
@@ -738,9 +740,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -756,9 +756,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
- ExternalReference store_ic_property = ExternalReference(
- IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
- __ TailCallExternalReference(store_ic_property, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/x87/ic-compiler-x87.cc b/deps/v8/src/ic/x87/ic-compiler-x87.cc
index 3697708037..4d5fc6a712 100644
--- a/deps/v8/src/ic/x87/ic-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/ic-compiler-x87.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/ic/ic.h"
@@ -69,7 +67,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
// Polymorphic keyed stores may use the map register
Register map_reg = scratch1();
DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+ map_reg.is(StoreTransitionDescriptor::MapRegister()));
__ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = maps->length();
int number_of_handled_maps = 0;
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index b863c69132..f9a94bc5b8 100644
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/codegen.h"
@@ -157,10 +155,9 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
(1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
__ j(not_zero, slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing
- // into string objects works as intended.
+ // Check that the object is some kind of JS object EXCEPT JS Value type. In
+ // the case that the object is a value-wrapper object, we enter the runtime
+ // system to make sure that indexing into string objects works as intended.
DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ CmpInstanceType(map, JS_OBJECT_TYPE);
@@ -341,16 +338,17 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- isolate->factory()->keyed_load_dummy_vector());
- int slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(isolate);
+ int slot = dummy_vector->GetIndex(
+ FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ push(Immediate(Smi::FromInt(slot)));
__ push(Immediate(dummy_vector));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
- false, receiver, key, ebx, edi);
+ receiver, key, ebx, edi);
__ pop(LoadWithVectorDescriptor::VectorRegister());
__ pop(LoadDescriptor::SlotRegister());
@@ -539,8 +537,11 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfNotSmi(key, &maybe_name_key);
__ CmpInstanceType(edi, JS_ARRAY_TYPE);
__ j(equal, &array);
- // Check that the object is some kind of JSObject.
- __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
+ // Check that the object is some kind of JS object EXCEPT JS Value type. In
+ // the case that the object is a value-wrapper object, we enter the runtime
+ // system to make sure that indexing into string objects works as intended.
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ __ CmpInstanceType(edi, JS_OBJECT_TYPE);
__ j(below, &slow);
// Object case: Check key against length in the elements array.
@@ -565,17 +566,18 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
if (FLAG_vector_stores) {
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector = Handle<TypeFeedbackVector>::cast(
- masm->isolate()->factory()->keyed_store_dummy_vector());
- int slot = dummy_vector->GetIndex(FeedbackVectorICSlot(0));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot = dummy_vector->GetIndex(
+ FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
__ push(Immediate(Smi::FromInt(slot)));
__ push(Immediate(dummy_vector));
}
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, false, receiver, key, ebx, no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+ receiver, key, ebx, no_reg);
if (FLAG_vector_stores) {
__ pop(VectorStoreICDescriptor::VectorRegister());
@@ -676,10 +678,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
int arg_count = 4;
- __ TailCallExternalReference(ref, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
}
@@ -709,10 +709,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
int arg_count = 4;
- __ TailCallExternalReference(ref, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
}
@@ -740,7 +738,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, false, StoreDescriptor::ReceiverRegister(),
+ masm, Code::STORE_IC, flags, StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister(), ebx, no_reg);
// Cache miss: Jump to runtime.
@@ -753,13 +751,24 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
-
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(value);
- __ push(ebx);
+ if (FLAG_vector_stores) {
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+
+ __ xchg(receiver, Operand(esp, 0));
+ __ push(name);
+ __ push(value);
+ __ push(slot);
+ __ push(vector);
+ __ push(receiver); // Contains the return address.
+ } else {
+ DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+ __ pop(ebx);
+ __ push(receiver);
+ __ push(name);
+ __ push(value);
+ __ push(ebx);
+ }
}
@@ -768,9 +777,8 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ int args = FLAG_vector_stores ? 5 : 3;
+ __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
}
@@ -806,9 +814,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
+ int args = FLAG_vector_stores ? 5 : 3;
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
}
diff --git a/deps/v8/src/ic/x87/stub-cache-x87.cc b/deps/v8/src/ic/x87/stub-cache-x87.cc
index dfbba47e3f..d76d0a26b7 100644
--- a/deps/v8/src/ic/x87/stub-cache-x87.cc
+++ b/deps/v8/src/ic/x87/stub-cache-x87.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/codegen.h"
@@ -18,7 +16,7 @@ namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags,
StubCache::Table table, Register name, Register receiver,
// Number of the cache entry pointer-size scaled.
Register offset, Register extra) {
@@ -65,8 +63,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ pop(LoadDescriptor::SlotRegister());
}
- if (leave_frame) __ leave();
-
// Jump to the first instruction in the code stub.
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(extra);
@@ -120,8 +116,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ pop(slot);
}
- if (leave_frame) __ leave();
-
// Jump to the first instruction in the code stub.
__ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(offset);
@@ -134,10 +128,9 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, bool leave_frame,
- Register receiver, Register name,
- Register scratch, Register extra, Register extra2,
- Register extra3) {
+ Code::Flags flags, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2, Register extra3) {
Label miss;
// Assert that code is valid. The multiplying code relies on the entry size
@@ -180,8 +173,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
DCHECK(kCacheIndexShift == kPointerSizeLog2);
// Probe the primary table.
- ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kPrimary, name,
- receiver, offset, extra);
+ ProbeTable(isolate(), masm, ic_kind, flags, kPrimary, name, receiver, offset,
+ extra);
// Primary miss: Compute hash for secondary probe.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
@@ -193,8 +186,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
// Probe the secondary table.
- ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kSecondary, name,
- receiver, offset, extra);
+ ProbeTable(isolate(), masm, ic_kind, flags, kSecondary, name, receiver,
+ offset, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index fa41eb0036..da9eb2991f 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -100,18 +100,50 @@ void StoreDescriptor::InitializePlatformSpecific(
}
-void StoreTransitionDescriptor::InitializePlatformSpecific(
+Type::FunctionType*
+StoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 4, isolate->interface_descriptor_zone());
+ function->InitParameter(0, AnyTagged()); // Receiver
+ function->InitParameter(1, AnyTagged()); // Name
+ function->InitParameter(2, AnyTagged()); // Value
+ function->InitParameter(3, AnyTagged()); // Map
+ return function;
+}
+
+
+Type::FunctionType*
+LoadGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 1, isolate->interface_descriptor_zone());
+ function->InitParameter(0, UntaggedSigned32());
+ return function;
+}
+
+
+void LoadGlobalViaContextDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- MapRegister()};
+ Register registers[] = {SlotRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void ElementTransitionAndStoreDescriptor::InitializePlatformSpecific(
+Type::FunctionType*
+StoreGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 2, isolate->interface_descriptor_zone());
+ function->InitParameter(0, UntaggedSigned32());
+ function->InitParameter(1, AnyTagged());
+ return function;
+}
+
+
+void StoreGlobalViaContextDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ValueRegister(), MapRegister(), NameRegister(),
- ReceiverRegister()};
+ Register registers[] = {SlotRegister(), ValueRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -123,6 +155,13 @@ void InstanceofDescriptor::InitializePlatformSpecific(
}
+void ToObjectDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void MathPowTaggedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {exponent()};
@@ -359,16 +398,30 @@ ApiAccessorDescriptor::BuildCallInterfaceDescriptorFunctionType(
}
-Type::FunctionType*
-MathRoundVariantDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+Type::FunctionType* MathRoundVariantCallFromUnoptimizedCodeDescriptor::
+ BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
+ int paramater_count) {
Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 2, isolate->interface_descriptor_zone());
- function->InitParameter(0, SmiType());
- function->InitParameter(1, AnyTagged());
+ AnyTagged(), Type::Undefined(), 4, isolate->interface_descriptor_zone());
+ function->InitParameter(0, Type::Receiver());
+ function->InitParameter(1, SmiType());
+ function->InitParameter(2, AnyTagged());
+ function->InitParameter(3, AnyTagged());
return function;
}
+Type::FunctionType* MathRoundVariantCallFromOptimizedCodeDescriptor::
+ BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
+ int paramater_count) {
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(), Type::Undefined(), 5, isolate->interface_descriptor_zone());
+ function->InitParameter(0, Type::Receiver());
+ function->InitParameter(1, SmiType());
+ function->InitParameter(2, AnyTagged());
+ function->InitParameter(3, AnyTagged());
+ function->InitParameter(4, AnyTagged());
+ return function;
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index f206b55841..a016797623 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -17,7 +17,6 @@ class PlatformInterfaceDescriptor;
V(Load) \
V(Store) \
V(StoreTransition) \
- V(ElementTransitionAndStore) \
V(VectorStoreICTrampoline) \
V(VectorStoreIC) \
V(Instanceof) \
@@ -25,6 +24,7 @@ class PlatformInterfaceDescriptor;
V(FastNewClosure) \
V(FastNewContext) \
V(ToNumber) \
+ V(ToObject) \
V(NumberToString) \
V(Typeof) \
V(FastCloneShallowArray) \
@@ -57,27 +57,26 @@ class PlatformInterfaceDescriptor;
V(ApiGetter) \
V(ArgumentsAccessRead) \
V(StoreArrayLiteralElement) \
+ V(LoadGlobalViaContext) \
+ V(StoreGlobalViaContext) \
V(MathPowTagged) \
V(MathPowInteger) \
V(ContextOnly) \
V(GrowArrayElements) \
- V(MathRoundVariant)
+ V(MathRoundVariantCallFromUnoptimizedCode) \
+ V(MathRoundVariantCallFromOptimizedCode)
class CallInterfaceDescriptorData {
public:
CallInterfaceDescriptorData()
- : stack_paramater_count_(-1),
- register_param_count_(-1),
- function_type_(nullptr) {}
+ : register_param_count_(-1), function_type_(nullptr) {}
// A copy of the passed in registers and param_representations is made
// and owned by the CallInterfaceDescriptorData.
- void InitializePlatformIndependent(int stack_paramater_count,
- Type::FunctionType* function_type) {
+ void InitializePlatformIndependent(Type::FunctionType* function_type) {
function_type_ = function_type;
- stack_paramater_count_ = stack_paramater_count;
}
// TODO(mvstanton): Instead of taking parallel arrays register and
@@ -90,12 +89,11 @@ class CallInterfaceDescriptorData {
bool IsInitialized() const { return register_param_count_ >= 0; }
+ int param_count() const { return function_type_->Arity(); }
int register_param_count() const { return register_param_count_; }
Register register_param(int index) const { return register_params_[index]; }
Register* register_params() const { return register_params_.get(); }
- Type* register_param_type(int index) const {
- return function_type_->Parameter(index);
- }
+ Type* param_type(int index) const { return function_type_->Parameter(index); }
PlatformInterfaceDescriptor* platform_specific_descriptor() const {
return platform_specific_descriptor_;
}
@@ -103,14 +101,13 @@ class CallInterfaceDescriptorData {
Type::FunctionType* function_type() const { return function_type_; }
private:
- int stack_paramater_count_;
int register_param_count_;
// The Register params are allocated dynamically by the
// InterfaceDescriptor, and freed on destruction. This is because static
// arrays of Registers cause creation of runtime static initializers
// which we don't want.
- SmartArrayPointer<Register> register_params_;
+ base::SmartArrayPointer<Register> register_params_;
// Specifies types for parameters and return
Type::FunctionType* function_type_;
@@ -140,6 +137,8 @@ class CallInterfaceDescriptor {
CallInterfaceDescriptor(Isolate* isolate, CallDescriptors::Key key)
: data_(isolate->call_descriptor_data(key)) {}
+ int GetParameterCount() const { return data()->param_count(); }
+
int GetRegisterParameterCount() const {
return data()->register_param_count();
}
@@ -153,8 +152,8 @@ class CallInterfaceDescriptor {
}
Type* GetParameterType(int index) const {
- DCHECK(index < data()->register_param_count());
- return data()->register_param_type(index);
+ DCHECK(index < data()->param_count());
+ return data()->param_type(index);
}
// Some platforms have extra information to associate with the descriptor.
@@ -192,7 +191,7 @@ class CallInterfaceDescriptor {
Type::FunctionType* function_type =
BuildCallInterfaceDescriptorFunctionType(isolate,
d->register_param_count());
- d->InitializePlatformIndependent(0, function_type);
+ d->InitializePlatformIndependent(function_type);
}
}
@@ -252,7 +251,8 @@ class StoreDescriptor : public CallInterfaceDescriptor {
class StoreTransitionDescriptor : public StoreDescriptor {
public:
- DECLARE_DESCRIPTOR(StoreTransitionDescriptor, StoreDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreTransitionDescriptor,
+ StoreDescriptor)
// Extends StoreDescriptor with Map parameter.
enum ParameterIndices {
@@ -262,14 +262,8 @@ class StoreTransitionDescriptor : public StoreDescriptor {
kMapIndex,
kParameterCount
};
- static const Register MapRegister();
-};
-
-
-class ElementTransitionAndStoreDescriptor : public StoreDescriptor {
- public:
- DECLARE_DESCRIPTOR(ElementTransitionAndStoreDescriptor, StoreDescriptor)
+ // MapRegister() is no_reg on ia32, instead it's on the stack.
static const Register MapRegister();
};
@@ -346,6 +340,16 @@ class ToNumberDescriptor : public CallInterfaceDescriptor {
};
+class ToObjectDescriptor : public CallInterfaceDescriptor {
+ public:
+ enum ParameterIndices { kReceiverIndex };
+
+ DECLARE_DESCRIPTOR(ToObjectDescriptor, CallInterfaceDescriptor)
+
+ static const Register ReceiverRegister();
+};
+
+
class NumberToStringDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(NumberToStringDescriptor, CallInterfaceDescriptor)
@@ -425,6 +429,25 @@ class RegExpConstructResultDescriptor : public CallInterfaceDescriptor {
};
+class LoadGlobalViaContextDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadGlobalViaContextDescriptor,
+ CallInterfaceDescriptor)
+
+ static const Register SlotRegister();
+};
+
+
+class StoreGlobalViaContextDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreGlobalViaContextDescriptor,
+ CallInterfaceDescriptor)
+
+ static const Register SlotRegister();
+ static const Register ValueRegister();
+};
+
+
class TransitionElementsKindDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(TransitionElementsKindDescriptor, CallInterfaceDescriptor)
@@ -584,10 +607,20 @@ class MathPowIntegerDescriptor : public CallInterfaceDescriptor {
};
-class MathRoundVariantDescriptor : public CallInterfaceDescriptor {
+class MathRoundVariantCallFromOptimizedCodeDescriptor
+ : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(MathRoundVariantDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+ MathRoundVariantCallFromOptimizedCodeDescriptor, CallInterfaceDescriptor)
+};
+
+
+class MathRoundVariantCallFromUnoptimizedCodeDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+ MathRoundVariantCallFromUnoptimizedCodeDescriptor,
+ CallInterfaceDescriptor)
};
diff --git a/deps/v8/src/interpreter/DEPS b/deps/v8/src/interpreter/DEPS
new file mode 100644
index 0000000000..f6fc3f63d7
--- /dev/null
+++ b/deps/v8/src/interpreter/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ "+src/compiler/interpreter-assembler.h",
+ "-src/v8.h",
+]
diff --git a/deps/v8/src/interpreter/OWNERS b/deps/v8/src/interpreter/OWNERS
new file mode 100644
index 0000000000..906a5ce641
--- /dev/null
+++ b/deps/v8/src/interpreter/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
new file mode 100644
index 0000000000..24fec96bfa
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -0,0 +1,222 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-array-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate)
+ : isolate_(isolate),
+ bytecode_generated_(false),
+ local_register_count_(-1),
+ temporary_register_count_(0),
+ temporary_register_next_(0) {}
+
+
+void BytecodeArrayBuilder::set_locals_count(int number_of_locals) {
+ local_register_count_ = number_of_locals;
+ temporary_register_next_ = local_register_count_;
+}
+
+
+int BytecodeArrayBuilder::locals_count() const { return local_register_count_; }
+
+
+Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
+ DCHECK_EQ(bytecode_generated_, false);
+ DCHECK_GE(local_register_count_, 0);
+ int bytecode_size = static_cast<int>(bytecodes_.size());
+ int register_count = local_register_count_ + temporary_register_count_;
+ int frame_size = register_count * kPointerSize;
+ Handle<BytecodeArray> output = isolate_->factory()->NewBytecodeArray(
+ bytecode_size, &bytecodes_.front(), frame_size);
+ bytecode_generated_ = true;
+ return output;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value binop,
+ Register reg) {
+ Output(BytecodeForBinaryOperation(binop), reg.ToOperand());
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
+ v8::internal::Smi* smi) {
+ int32_t raw_smi = smi->value();
+ if (raw_smi == 0) {
+ Output(Bytecode::kLdaZero);
+ } else if (raw_smi >= -128 && raw_smi <= 127) {
+ Output(Bytecode::kLdaSmi8, static_cast<uint8_t>(raw_smi));
+ } else {
+ // TODO(oth): Put Smi in constant pool.
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadUndefined() {
+ Output(Bytecode::kLdaUndefined);
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNull() {
+ Output(Bytecode::kLdaNull);
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadTheHole() {
+ Output(Bytecode::kLdaTheHole);
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadTrue() {
+ Output(Bytecode::kLdaTrue);
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadFalse() {
+ Output(Bytecode::kLdaFalse);
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
+ Register reg) {
+ Output(Bytecode::kLdar, reg.ToOperand());
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
+ Register reg) {
+ Output(Bytecode::kStar, reg.ToOperand());
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
+ Output(Bytecode::kReturn);
+ return *this;
+}
+
+
+int BytecodeArrayBuilder::BorrowTemporaryRegister() {
+ DCHECK_GE(local_register_count_, 0);
+ int temporary_reg_index = temporary_register_next_++;
+ int count = temporary_register_next_ - local_register_count_;
+ if (count > temporary_register_count_) {
+ temporary_register_count_ = count;
+ }
+ return temporary_reg_index;
+}
+
+
+void BytecodeArrayBuilder::ReturnTemporaryRegister(int reg_index) {
+ DCHECK_EQ(reg_index, temporary_register_next_ - 1);
+ temporary_register_next_ = reg_index;
+}
+
+
+bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode, int operand_index,
+ uint8_t operand_value) const {
+ OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
+ switch (operand_type) {
+ case OperandType::kNone:
+ return false;
+ case OperandType::kImm8:
+ return true;
+ case OperandType::kReg:
+ return Register::FromOperand(operand_value).index() <
+ temporary_register_next_;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint8_t operand0,
+ uint8_t operand1, uint8_t operand2) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
+ DCHECK(OperandIsValid(bytecode, 0, operand0) &&
+ OperandIsValid(bytecode, 1, operand1) &&
+ OperandIsValid(bytecode, 2, operand2));
+ bytecodes_.push_back(Bytecodes::ToByte(bytecode));
+ bytecodes_.push_back(operand0);
+ bytecodes_.push_back(operand1);
+ bytecodes_.push_back(operand2);
+}
+
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint8_t operand0,
+ uint8_t operand1) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
+ DCHECK(OperandIsValid(bytecode, 0, operand0) &&
+ OperandIsValid(bytecode, 1, operand1));
+ bytecodes_.push_back(Bytecodes::ToByte(bytecode));
+ bytecodes_.push_back(operand0);
+ bytecodes_.push_back(operand1);
+}
+
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint8_t operand0) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
+ DCHECK(OperandIsValid(bytecode, 0, operand0));
+ bytecodes_.push_back(Bytecodes::ToByte(bytecode));
+ bytecodes_.push_back(operand0);
+}
+
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
+ bytecodes_.push_back(Bytecodes::ToByte(bytecode));
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
+ switch (op) {
+ case Token::Value::ADD:
+ return Bytecode::kAdd;
+ case Token::Value::SUB:
+ return Bytecode::kSub;
+ case Token::Value::MUL:
+ return Bytecode::kMul;
+ case Token::Value::DIV:
+ return Bytecode::kDiv;
+ default:
+ UNIMPLEMENTED();
+ return static_cast<Bytecode>(-1);
+ }
+}
+
+
+TemporaryRegisterScope::TemporaryRegisterScope(BytecodeArrayBuilder* builder)
+ : builder_(builder), count_(0), last_register_index_(-1) {}
+
+
+TemporaryRegisterScope::~TemporaryRegisterScope() {
+ while (count_-- != 0) {
+ builder_->ReturnTemporaryRegister(last_register_index_--);
+ }
+}
+
+
+Register TemporaryRegisterScope::NewRegister() {
+ count_++;
+ last_register_index_ = builder_->BorrowTemporaryRegister();
+ return Register(last_register_index_);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
new file mode 100644
index 0000000000..c4ab816665
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -0,0 +1,122 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
+#define V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
+
+#include <vector>
+
+#include "src/ast.h"
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+namespace interpreter {
+
+class Register;
+
+class BytecodeArrayBuilder {
+ public:
+ explicit BytecodeArrayBuilder(Isolate* isolate);
+ Handle<BytecodeArray> ToBytecodeArray();
+
+ // Set number of locals required for bytecode array.
+ void set_locals_count(int number_of_locals);
+ int locals_count() const;
+
+ // Constant loads to accumulator.
+ BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
+ BytecodeArrayBuilder& LoadUndefined();
+ BytecodeArrayBuilder& LoadNull();
+ BytecodeArrayBuilder& LoadTheHole();
+ BytecodeArrayBuilder& LoadTrue();
+ BytecodeArrayBuilder& LoadFalse();
+
+ // Register-accumulator transfers.
+ BytecodeArrayBuilder& LoadAccumulatorWithRegister(Register reg);
+ BytecodeArrayBuilder& StoreAccumulatorInRegister(Register reg);
+
+ // Operators.
+ BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg);
+
+ // Flow Control.
+ BytecodeArrayBuilder& Return();
+
+ private:
+ static Bytecode BytecodeForBinaryOperation(Token::Value op);
+
+ void Output(Bytecode bytecode, uint8_t r0, uint8_t r1, uint8_t r2);
+ void Output(Bytecode bytecode, uint8_t r0, uint8_t r1);
+ void Output(Bytecode bytecode, uint8_t r0);
+ void Output(Bytecode bytecode);
+
+ bool OperandIsValid(Bytecode bytecode, int operand_index,
+ uint8_t operand_value) const;
+
+ int BorrowTemporaryRegister();
+ void ReturnTemporaryRegister(int reg_index);
+
+ Isolate* isolate_;
+ std::vector<uint8_t> bytecodes_;
+ bool bytecode_generated_;
+
+ int local_register_count_;
+ int temporary_register_count_;
+ int temporary_register_next_;
+
+ friend class TemporaryRegisterScope;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BytecodeArrayBuilder);
+};
+
+// An interpreter register which is located in the function's regsiter file
+// in its stack-frame.
+class Register {
+ public:
+ static const int kMaxRegisterIndex = 128;
+
+ explicit Register(int index) : index_(index) {
+ DCHECK_LE(index_, kMaxRegisterIndex);
+ }
+
+ int index() { return index_; }
+ uint8_t ToOperand() { return static_cast<uint8_t>(-index_); }
+ static Register FromOperand(uint8_t operand) {
+ return Register(-static_cast<int8_t>(operand));
+ }
+
+ private:
+ void* operator new(size_t size);
+ void operator delete(void* p);
+
+ int index_;
+};
+
+// A stack-allocated class than allows the instantiator to allocate
+// temporary registers that are cleaned up when scope is closed.
+class TemporaryRegisterScope {
+ public:
+ explicit TemporaryRegisterScope(BytecodeArrayBuilder* builder);
+ ~TemporaryRegisterScope();
+ Register NewRegister();
+
+ private:
+ void* operator new(size_t size);
+ void operator delete(void* p);
+
+ BytecodeArrayBuilder* builder_;
+ int count_;
+ int last_register_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(TemporaryRegisterScope);
+};
+
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
new file mode 100644
index 0000000000..9cce681ad4
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -0,0 +1,370 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-generator.h"
+
+#include <stack>
+
+#include "src/compiler.h"
+#include "src/objects.h"
+#include "src/scopes.h"
+#include "src/token.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeGenerator::BytecodeGenerator(Isolate* isolate, Zone* zone)
+ : builder_(isolate) {
+ InitializeAstVisitor(isolate, zone);
+}
+
+
+BytecodeGenerator::~BytecodeGenerator() {}
+
+
+Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
+ set_scope(info->scope());
+
+ // This a temporary guard (oth).
+ DCHECK(scope()->is_function_scope());
+
+ builder().set_locals_count(scope()->num_stack_slots());
+
+ // Visit implicit declaration of the function name.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ VisitVariableDeclaration(scope()->function());
+ }
+
+ // Visit declarations within the function scope.
+ VisitDeclarations(scope()->declarations());
+
+ // Visit statements in the function body.
+ VisitStatements(info->literal()->body());
+
+ set_scope(nullptr);
+ return builder_.ToBytecodeArray();
+}
+
+
+void BytecodeGenerator::VisitBlock(Block* node) {
+ if (node->scope() == NULL) {
+ // Visit statements in the same scope, no declarations.
+ VisitStatements(node->statements());
+ } else {
+ // Visit declarations and statements in a block scope.
+ if (node->scope()->ContextLocalCount() > 0) {
+ UNIMPLEMENTED();
+ } else {
+ VisitDeclarations(node->scope()->declarations());
+ VisitStatements(node->statements());
+ }
+ }
+}
+
+
+void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
+ Variable* variable = decl->proxy()->var();
+ switch (variable->location()) {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
+ UNIMPLEMENTED();
+ break;
+ case VariableLocation::PARAMETER:
+ UNIMPLEMENTED();
+ break;
+ case VariableLocation::LOCAL:
+ // Details stored in scope, i.e. variable index.
+ break;
+ case VariableLocation::CONTEXT:
+ case VariableLocation::LOOKUP:
+ UNIMPLEMENTED();
+ break;
+ }
+}
+
+
+void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitImportDeclaration(ImportDeclaration* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitExportDeclaration(ExportDeclaration* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitIfStatement(IfStatement* node) { UNIMPLEMENTED(); }
+
+
+void BytecodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitBreakStatement(BreakStatement* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+ Visit(node->expression());
+ builder().Return();
+}
+
+
+void BytecodeGenerator::VisitWithStatement(WithStatement* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitCaseClause(CaseClause* clause) { UNIMPLEMENTED(); }
+
+
+void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitWhileStatement(WhileStatement* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitForStatement(ForStatement* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitForInStatement(ForInStatement* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitForOfStatement(ForOfStatement* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitClassLiteral(ClassLiteral* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitNativeFunctionLiteral(
+ NativeFunctionLiteral* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitConditional(Conditional* node) { UNIMPLEMENTED(); }
+
+
+void BytecodeGenerator::VisitLiteral(Literal* expr) {
+ if (expr->IsPropertyName()) {
+ UNIMPLEMENTED();
+ }
+
+ Handle<Object> value = expr->value();
+ if (value->IsSmi()) {
+ builder().LoadLiteral(Smi::cast(*value));
+ } else if (value->IsUndefined()) {
+ builder().LoadUndefined();
+ } else if (value->IsTrue()) {
+ builder().LoadTrue();
+ } else if (value->IsFalse()) {
+ builder().LoadFalse();
+ } else if (value->IsNull()) {
+ builder().LoadNull();
+ } else if (value->IsTheHole()) {
+ builder().LoadTheHole();
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+
+void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case VariableLocation::LOCAL: {
+ Register source(variable->index());
+ builder().LoadAccumulatorWithRegister(source);
+ break;
+ }
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::CONTEXT:
+ case VariableLocation::LOOKUP:
+ UNIMPLEMENTED();
+ }
+}
+
+
+void BytecodeGenerator::VisitAssignment(Assignment* expr) {
+ DCHECK(expr->target()->IsValidReferenceExpression());
+
+ // Left-hand side can only be a property, a global or a variable slot.
+ Property* property = expr->target()->AsProperty();
+ LhsKind assign_type = Property::GetAssignType(property);
+
+ DCHECK(!expr->is_compound());
+ Visit(expr->value());
+
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* variable = expr->target()->AsVariableProxy()->var();
+ DCHECK(variable->location() == VariableLocation::LOCAL);
+ Register destination(variable->index());
+ builder().StoreAccumulatorInRegister(destination);
+ break;
+ }
+ case NAMED_PROPERTY:
+ case KEYED_PROPERTY:
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNIMPLEMENTED();
+ }
+}
+
+
+void BytecodeGenerator::VisitYield(Yield* node) { UNIMPLEMENTED(); }
+
+
+void BytecodeGenerator::VisitThrow(Throw* node) { UNIMPLEMENTED(); }
+
+
+void BytecodeGenerator::VisitProperty(Property* node) { UNIMPLEMENTED(); }
+
+
+void BytecodeGenerator::VisitCall(Call* node) { UNIMPLEMENTED(); }
+
+
+void BytecodeGenerator::VisitCallNew(CallNew* node) { UNIMPLEMENTED(); }
+
+
+void BytecodeGenerator::VisitCallRuntime(CallRuntime* node) { UNIMPLEMENTED(); }
+
+
+void BytecodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitCountOperation(CountOperation* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
+ switch (binop->op()) {
+ case Token::COMMA:
+ case Token::OR:
+ case Token::AND:
+ UNIMPLEMENTED();
+ break;
+ default:
+ VisitArithmeticExpression(binop);
+ break;
+ }
+}
+
+
+void BytecodeGenerator::VisitCompareOperation(CompareOperation* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitSpread(Spread* node) { UNIMPLEMENTED(); }
+
+
+void BytecodeGenerator::VisitThisFunction(ThisFunction* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitSuperCallReference(SuperCallReference* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitSuperPropertyReference(
+ SuperPropertyReference* node) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* binop) {
+ Token::Value op = binop->op();
+ Expression* left = binop->left();
+ Expression* right = binop->right();
+
+ TemporaryRegisterScope temporary_register_scope(&builder_);
+ Register temporary = temporary_register_scope.NewRegister();
+
+ Visit(left);
+ builder().StoreAccumulatorInRegister(temporary);
+ Visit(right);
+ builder().BinaryOperation(op, temporary);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
new file mode 100644
index 0000000000..5caf3f1813
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -0,0 +1,44 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_GENERATOR_H_
+#define V8_INTERPRETER_BYTECODE_GENERATOR_H_
+
+#include "src/ast.h"
+#include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeGenerator : public AstVisitor {
+ public:
+ BytecodeGenerator(Isolate* isolate, Zone* zone);
+ virtual ~BytecodeGenerator();
+
+ Handle<BytecodeArray> MakeBytecode(CompilationInfo* info);
+
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ private:
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+ void VisitArithmeticExpression(BinaryOperation* binop);
+
+ inline BytecodeArrayBuilder& builder() { return builder_; }
+ inline Scope* scope() const { return scope_; }
+ inline void set_scope(Scope* s) { scope_ = s; }
+
+ BytecodeArrayBuilder builder_;
+ Scope* scope_;
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_GENERATOR_H_
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
new file mode 100644
index 0000000000..8232b657e7
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -0,0 +1,158 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecodes.h"
+
+#include "src/interpreter/bytecode-array-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// Maximum number of operands a bytecode may have.
+static const int kMaxOperands = 3;
+
+// kBytecodeTable relies on kNone being the same as zero to detect length.
+STATIC_ASSERT(static_cast<int>(OperandType::kNone) == 0);
+
+static const OperandType kBytecodeTable[][kMaxOperands] = {
+#define DECLARE_OPERAND(_, ...) \
+ { __VA_ARGS__ } \
+ ,
+ BYTECODE_LIST(DECLARE_OPERAND)
+#undef DECLARE_OPERAND
+};
+
+
+// static
+const char* Bytecodes::ToString(Bytecode bytecode) {
+ switch (bytecode) {
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ return #Name;
+ BYTECODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return "";
+}
+
+
+// static
+const char* Bytecodes::OperandTypeToString(OperandType operand_type) {
+ switch (operand_type) {
+#define CASE(Name) \
+ case OperandType::k##Name: \
+ return #Name;
+ OPERAND_TYPE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return "";
+}
+
+
+// static
+uint8_t Bytecodes::ToByte(Bytecode bytecode) {
+ return static_cast<uint8_t>(bytecode);
+}
+
+
+// static
+Bytecode Bytecodes::FromByte(uint8_t value) {
+ Bytecode bytecode = static_cast<Bytecode>(value);
+ DCHECK(bytecode <= Bytecode::kLast);
+ return bytecode;
+}
+
+
+// static
+int Bytecodes::NumberOfOperands(Bytecode bytecode) {
+ DCHECK(bytecode <= Bytecode::kLast);
+ int count;
+ uint8_t row = ToByte(bytecode);
+ for (count = 0; count < kMaxOperands; count++) {
+ if (kBytecodeTable[row][count] == OperandType::kNone) {
+ break;
+ }
+ }
+ return count;
+}
+
+
+// static
+OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
+ DCHECK(bytecode <= Bytecode::kLast && i < NumberOfOperands(bytecode));
+ return kBytecodeTable[ToByte(bytecode)][i];
+}
+
+
+// static
+int Bytecodes::Size(Bytecode bytecode) {
+ return 1 + NumberOfOperands(bytecode);
+}
+
+
+// static
+int Bytecodes::MaximumNumberOfOperands() { return kMaxOperands; }
+
+
+// static
+int Bytecodes::MaximumSize() { return 1 + kMaxOperands; }
+
+
+// static
+std::ostream& Bytecodes::Decode(std::ostream& os,
+ const uint8_t* bytecode_start) {
+ Vector<char> buf = Vector<char>::New(50);
+
+ Bytecode bytecode = Bytecodes::FromByte(bytecode_start[0]);
+ int bytecode_size = Bytecodes::Size(bytecode);
+
+ for (int i = 0; i < bytecode_size; i++) {
+ SNPrintF(buf, "%02x ", bytecode_start[i]);
+ os << buf.start();
+ }
+ for (int i = bytecode_size; i < Bytecodes::MaximumSize(); i++) {
+ os << " ";
+ }
+
+ os << bytecode << " ";
+
+ const uint8_t* operands_start = bytecode_start + 1;
+ int operands_size = bytecode_size - 1;
+ for (int i = 0; i < operands_size; i++) {
+ OperandType op_type = GetOperandType(bytecode, i);
+ uint8_t operand = operands_start[i];
+ switch (op_type) {
+ case interpreter::OperandType::kImm8:
+ os << "#" << static_cast<int>(operand);
+ break;
+ case interpreter::OperandType::kReg:
+ os << "r" << Register::FromOperand(operand).index();
+ break;
+ case interpreter::OperandType::kNone:
+ UNREACHABLE();
+ break;
+ }
+ if (i != operands_size - 1) {
+ os << ", ";
+ }
+ }
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode) {
+ return os << Bytecodes::ToString(bytecode);
+}
+
+
+std::ostream& operator<<(std::ostream& os, const OperandType& operand_type) {
+ return os << Bytecodes::OperandTypeToString(operand_type);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
new file mode 100644
index 0000000000..fec6ecf6aa
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -0,0 +1,119 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODES_H_
+#define V8_INTERPRETER_BYTECODES_H_
+
+#include <iosfwd>
+
+// Clients of this interface shouldn't depend on lots of interpreter internals.
+// Do not include anything from src/interpreter here!
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// The list of operand types used by bytecodes.
+#define OPERAND_TYPE_LIST(V) \
+ V(None) \
+ V(Imm8) \
+ V(Reg)
+
+// The list of bytecodes which are interpreted by the interpreter.
+#define BYTECODE_LIST(V) \
+ \
+ /* Loading the accumulator */ \
+ V(LdaZero, OperandType::kNone) \
+ V(LdaSmi8, OperandType::kImm8) \
+ V(LdaUndefined, OperandType::kNone) \
+ V(LdaNull, OperandType::kNone) \
+ V(LdaTheHole, OperandType::kNone) \
+ V(LdaTrue, OperandType::kNone) \
+ V(LdaFalse, OperandType::kNone) \
+ \
+ /* Register-accumulator transfers */ \
+ V(Ldar, OperandType::kReg) \
+ V(Star, OperandType::kReg) \
+ \
+ /* Binary Operators */ \
+ V(Add, OperandType::kReg) \
+ V(Sub, OperandType::kReg) \
+ V(Mul, OperandType::kReg) \
+ V(Div, OperandType::kReg) \
+ \
+ /* Control Flow */ \
+ V(Return, OperandType::kNone)
+
+
+// Enumeration of operand types used by bytecodes.
+enum class OperandType : uint8_t {
+#define DECLARE_OPERAND_TYPE(Name) k##Name,
+ OPERAND_TYPE_LIST(DECLARE_OPERAND_TYPE)
+#undef DECLARE_OPERAND_TYPE
+#define COUNT_OPERAND_TYPES(x) +1
+ // The COUNT_OPERAND macro will turn this into kLast = -1 +1 +1... which will
+ // evaluate to the same value as the last operand.
+ kLast = -1 OPERAND_TYPE_LIST(COUNT_OPERAND_TYPES)
+#undef COUNT_OPERAND_TYPES
+};
+
+
+// Enumeration of interpreter bytecodes.
+enum class Bytecode : uint8_t {
+#define DECLARE_BYTECODE(Name, ...) k##Name,
+ BYTECODE_LIST(DECLARE_BYTECODE)
+#undef DECLARE_BYTECODE
+#define COUNT_BYTECODE(x, ...) +1
+ // The COUNT_BYTECODE macro will turn this into kLast = -1 +1 +1... which will
+ // evaluate to the same value as the last real bytecode.
+ kLast = -1 BYTECODE_LIST(COUNT_BYTECODE)
+#undef COUNT_BYTECODE
+};
+
+
+class Bytecodes {
+ public:
+ // Returns string representation of |bytecode|.
+ static const char* ToString(Bytecode bytecode);
+
+ // Returns string representation of |operand_type|.
+ static const char* OperandTypeToString(OperandType operand_type);
+
+ // Returns byte value of bytecode.
+ static uint8_t ToByte(Bytecode bytecode);
+
+ // Returns bytecode for |value|.
+ static Bytecode FromByte(uint8_t value);
+
+ // Returns the number of operands expected by |bytecode|.
+ static int NumberOfOperands(Bytecode bytecode);
+
+ // Return the i-th operand of |bytecode|.
+ static OperandType GetOperandType(Bytecode bytecode, int i);
+
+ // Returns the size of the bytecode including its operands.
+ static int Size(Bytecode bytecode);
+
+ // The maximum number of operands across all bytecodes.
+ static int MaximumNumberOfOperands();
+
+ // Maximum size of a bytecode and its operands.
+ static int MaximumSize();
+
+ // Decode a single bytecode and operands to |os|.
+ static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Bytecodes);
+};
+
+std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
+std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODES_H_
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
new file mode 100644
index 0000000000..565fa0c443
--- /dev/null
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -0,0 +1,231 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/interpreter.h"
+
+#include "src/compiler.h"
+#include "src/compiler/interpreter-assembler.h"
+#include "src/factory.h"
+#include "src/interpreter/bytecode-generator.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+using compiler::Node;
+#define __ assembler->
+
+
+Interpreter::Interpreter(Isolate* isolate)
+ : isolate_(isolate) {}
+
+
+// static
+Handle<FixedArray> Interpreter::CreateUninitializedInterpreterTable(
+ Isolate* isolate) {
+ Handle<FixedArray> handler_table = isolate->factory()->NewFixedArray(
+ static_cast<int>(Bytecode::kLast) + 1, TENURED);
+ // We rely on the interpreter handler table being immovable, so check that
+ // it was allocated on the first page (which is always immovable).
+ DCHECK(isolate->heap()->old_space()->FirstPage()->Contains(
+ handler_table->address()));
+ return handler_table;
+}
+
+
+void Interpreter::Initialize() {
+ DCHECK(FLAG_ignition);
+ Handle<FixedArray> handler_table = isolate_->factory()->interpreter_table();
+ if (!IsInterpreterTableInitialized(handler_table)) {
+ Zone zone;
+ HandleScope scope(isolate_);
+
+#define GENERATE_CODE(Name, ...) \
+ { \
+ compiler::InterpreterAssembler assembler(isolate_, &zone, \
+ Bytecode::k##Name); \
+ Do##Name(&assembler); \
+ Handle<Code> code = assembler.GenerateCode(); \
+ handler_table->set(static_cast<int>(Bytecode::k##Name), *code); \
+ }
+ BYTECODE_LIST(GENERATE_CODE)
+#undef GENERATE_CODE
+ }
+}
+
+
+bool Interpreter::MakeBytecode(CompilationInfo* info) {
+ Handle<SharedFunctionInfo> shared_info = info->shared_info();
+
+ BytecodeGenerator generator(info->isolate(), info->zone());
+ Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
+ if (FLAG_print_bytecode) {
+ bytecodes->Print();
+ }
+
+ DCHECK(shared_info->function_data()->IsUndefined());
+ if (!shared_info->function_data()->IsUndefined()) {
+ return false;
+ }
+
+ shared_info->set_function_data(*bytecodes);
+ info->SetCode(info->isolate()->builtins()->InterpreterEntryTrampoline());
+ info->EnsureFeedbackVector();
+ return true;
+}
+
+
+bool Interpreter::IsInterpreterTableInitialized(
+ Handle<FixedArray> handler_table) {
+ DCHECK(handler_table->length() == static_cast<int>(Bytecode::kLast) + 1);
+ return handler_table->get(0) != isolate_->heap()->undefined_value();
+}
+
+
+// LdaZero
+//
+// Load literal '0' into the accumulator.
+void Interpreter::DoLdaZero(compiler::InterpreterAssembler* assembler) {
+ Node* zero_value = __ NumberConstant(0.0);
+ __ SetAccumulator(zero_value);
+ __ Dispatch();
+}
+
+
+// LdaSmi8 <imm8>
+//
+// Load an 8-bit integer literal into the accumulator as a Smi.
+void Interpreter::DoLdaSmi8(compiler::InterpreterAssembler* assembler) {
+ Node* raw_int = __ BytecodeOperandImm8(0);
+ Node* smi_int = __ SmiTag(raw_int);
+ __ SetAccumulator(smi_int);
+ __ Dispatch();
+}
+
+
+// LdaUndefined
+//
+// Load Undefined into the accumulator.
+void Interpreter::DoLdaUndefined(compiler::InterpreterAssembler* assembler) {
+ Node* undefined_value = __ HeapConstant(Unique<HeapObject>::CreateImmovable(
+ isolate_->factory()->undefined_value()));
+ __ SetAccumulator(undefined_value);
+ __ Dispatch();
+}
+
+
+// LdaNull
+//
+// Load Null into the accumulator.
+void Interpreter::DoLdaNull(compiler::InterpreterAssembler* assembler) {
+ Node* null_value = __ HeapConstant(
+ Unique<HeapObject>::CreateImmovable(isolate_->factory()->null_value()));
+ __ SetAccumulator(null_value);
+ __ Dispatch();
+}
+
+
+// LdaTheHole
+//
+// Load TheHole into the accumulator.
+void Interpreter::DoLdaTheHole(compiler::InterpreterAssembler* assembler) {
+ Node* the_hole_value = __ HeapConstant(Unique<HeapObject>::CreateImmovable(
+ isolate_->factory()->the_hole_value()));
+ __ SetAccumulator(the_hole_value);
+ __ Dispatch();
+}
+
+
+// LdaTrue
+//
+// Load True into the accumulator.
+void Interpreter::DoLdaTrue(compiler::InterpreterAssembler* assembler) {
+ Node* true_value = __ HeapConstant(
+ Unique<HeapObject>::CreateImmovable(isolate_->factory()->true_value()));
+ __ SetAccumulator(true_value);
+ __ Dispatch();
+}
+
+
+// LdaFalse
+//
+// Load False into the accumulator.
+void Interpreter::DoLdaFalse(compiler::InterpreterAssembler* assembler) {
+ Node* false_value = __ HeapConstant(
+ Unique<HeapObject>::CreateImmovable(isolate_->factory()->false_value()));
+ __ SetAccumulator(false_value);
+ __ Dispatch();
+}
+
+
+// Ldar <src>
+//
+// Load accumulator with value from register <src>.
+void Interpreter::DoLdar(compiler::InterpreterAssembler* assembler) {
+ Node* value = __ LoadRegister(__ BytecodeOperandReg(0));
+ __ SetAccumulator(value);
+ __ Dispatch();
+}
+
+
+// Star <dst>
+//
+// Store accumulator to register <dst>.
+void Interpreter::DoStar(compiler::InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* accumulator = __ GetAccumulator();
+ __ StoreRegister(accumulator, reg_index);
+ __ Dispatch();
+}
+
+
+// Add <src>
+//
+// Add register <src> to accumulator.
+void Interpreter::DoAdd(compiler::InterpreterAssembler* assembler) {
+ // TODO(rmcilroy) Implement.
+ __ Dispatch();
+}
+
+
+// Sub <src>
+//
+// Subtract register <src> from accumulator.
+void Interpreter::DoSub(compiler::InterpreterAssembler* assembler) {
+ // TODO(rmcilroy) Implement.
+ __ Dispatch();
+}
+
+
+// Mul <src>
+//
+// Multiply accumulator by register <src>.
+void Interpreter::DoMul(compiler::InterpreterAssembler* assembler) {
+ // TODO(rmcilroy) Implement add register to accumulator.
+ __ Dispatch();
+}
+
+
+// Div <src>
+//
+// Divide register <src> by accumulator.
+void Interpreter::DoDiv(compiler::InterpreterAssembler* assembler) {
+ // TODO(rmcilroy) Implement.
+ __ Dispatch();
+}
+
+
+// Return
+//
+// Return the value in register 0.
+void Interpreter::DoReturn(compiler::InterpreterAssembler* assembler) {
+ __ Return();
+}
+
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
new file mode 100644
index 0000000000..64101de657
--- /dev/null
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -0,0 +1,60 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_INTERPRETER_H_
+#define V8_INTERPRETER_INTERPRETER_H_
+
+// Clients of this interface shouldn't depend on lots of interpreter internals.
+// Do not include anything from src/interpreter other than
+// src/interpreter/bytecodes.h here!
+#include "src/base/macros.h"
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class CompilationInfo;
+
+namespace compiler {
+class InterpreterAssembler;
+}
+
+namespace interpreter {
+
+class Interpreter {
+ public:
+ explicit Interpreter(Isolate* isolate);
+ virtual ~Interpreter() {}
+
+ // Creates an uninitialized interpreter handler table, where each handler
+ // points to the Illegal builtin.
+ static Handle<FixedArray> CreateUninitializedInterpreterTable(
+ Isolate* isolate);
+
+ // Initializes the interpreter.
+ void Initialize();
+
+ // Generate bytecode for |info|.
+ static bool MakeBytecode(CompilationInfo* info);
+
+ private:
+// Bytecode handler generator functions.
+#define DECLARE_BYTECODE_HANDLER_GENERATOR(Name, ...) \
+ void Do##Name(compiler::InterpreterAssembler* assembler);
+ BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
+#undef DECLARE_BYTECODE_HANDLER_GENERATOR
+
+ bool IsInterpreterTableInitialized(Handle<FixedArray> handler_table);
+
+ Isolate* isolate_;
+
+ DISALLOW_COPY_AND_ASSIGN(Interpreter);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_INTERPRETER_H_
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 2c83faac4f..155e9ead08 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -19,17 +19,18 @@
#include "src/compilation-cache.h"
#include "src/compilation-statistics.h"
#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
-#include "src/heap/spaces.h"
+#include "src/frames-inl.h"
#include "src/heap-profiler.h"
#include "src/hydrogen.h"
#include "src/ic/stub-cache.h"
+#include "src/interpreter/interpreter.h"
#include "src/lithium-allocator.h"
#include "src/log.h"
#include "src/messages.h"
#include "src/prototype.h"
-#include "src/regexp-stack.h"
+#include "src/regexp/regexp-stack.h"
#include "src/runtime-profiler.h"
#include "src/sampler.h"
#include "src/scopeinfo.h"
@@ -331,15 +332,12 @@ static bool IsVisibleInStackTrace(JSFunction* fun,
Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
Handle<Object> caller) {
// Get stack trace limit.
- Handle<Object> error = Object::GetProperty(
- this, js_builtins_object(), "$Error").ToHandleChecked();
- if (!error->IsJSObject()) return factory()->undefined_value();
-
+ Handle<JSObject> error = error_function();
Handle<String> stackTraceLimit =
factory()->InternalizeUtf8String("stackTraceLimit");
DCHECK(!stackTraceLimit.is_null());
- Handle<Object> stack_trace_limit = JSReceiver::GetDataProperty(
- Handle<JSObject>::cast(error), stackTraceLimit);
+ Handle<Object> stack_trace_limit =
+ JSReceiver::GetDataProperty(error, stackTraceLimit);
if (!stack_trace_limit->IsNumber()) return factory()->undefined_value();
int limit = FastD2IChecked(stack_trace_limit->Number());
limit = Max(limit, 0); // Ensure that limit is not negative.
@@ -847,17 +845,19 @@ Object* Isolate::StackOverflow() {
// At this point we cannot create an Error object using its javascript
// constructor. Instead, we copy the pre-constructed boilerplate and
// attach the stack trace as a hidden property.
- Handle<String> key = factory()->stack_overflow_string();
- Handle<Object> boilerplate =
- Object::GetProperty(js_builtins_object(), key).ToHandleChecked();
- if (boilerplate->IsUndefined()) {
- return Throw(heap()->undefined_value(), nullptr);
- }
- Handle<JSObject> exception =
- factory()->CopyJSObject(Handle<JSObject>::cast(boilerplate));
+ Handle<Object> exception;
+ if (bootstrapper()->IsActive()) {
+ // There is no boilerplate to use during bootstrapping.
+ exception = factory()->NewStringFromAsciiChecked(
+ MessageTemplate::TemplateString(MessageTemplate::kStackOverflow));
+ } else {
+ Handle<JSObject> boilerplate = stack_overflow_boilerplate();
+ Handle<JSObject> copy = factory()->CopyJSObject(boilerplate);
+ CaptureAndSetSimpleStackTrace(copy, factory()->undefined_value());
+ exception = copy;
+ }
Throw(*exception, nullptr);
- CaptureAndSetSimpleStackTrace(exception, factory()->undefined_value());
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && FLAG_stress_compaction) {
heap()->CollectAllAvailableGarbage("trigger compaction");
@@ -1013,21 +1013,13 @@ Object* Isolate::Throw(Object* exception, MessageLocation* location) {
Handle<Object> message_obj = CreateMessage(exception_handle, location);
thread_local_top()->pending_message_obj_ = *message_obj;
- // For any exception not caught by JavaScript, even when an external
- // handler is present:
- // If the abort-on-uncaught-exception flag is specified, and if the
- // embedder didn't specify a custom uncaught exception callback,
- // or if the custom callback determined that V8 should abort, then
- // abort.
+ // If the abort-on-uncaught-exception flag is specified, abort on any
+ // exception not caught by JavaScript, even when an external handler is
+ // present. This flag is intended for use by JavaScript developers, so
+ // print a user-friendly stack trace (not an internal one).
if (FLAG_abort_on_uncaught_exception &&
- PredictExceptionCatcher() != CAUGHT_BY_JAVASCRIPT &&
- (!abort_on_uncaught_exception_callback_ ||
- abort_on_uncaught_exception_callback_(
- reinterpret_cast<v8::Isolate*>(this)))) {
- // Prevent endless recursion.
- FLAG_abort_on_uncaught_exception = false;
- // This flag is intended for use by JavaScript developers, so
- // print a user-friendly stack trace (not an internal one).
+ PredictExceptionCatcher() != CAUGHT_BY_JAVASCRIPT) {
+ FLAG_abort_on_uncaught_exception = false; // Prevent endless recursion.
PrintF(stderr, "%s\n\nFROM\n",
MessageHandler::GetLocalizedMessage(this, message_obj).get());
PrintCurrentStackTrace(stderr);
@@ -1351,12 +1343,7 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
// the Error object.
bool Isolate::IsErrorObject(Handle<Object> obj) {
if (!obj->IsJSObject()) return false;
-
- Handle<String> error_key =
- factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("$Error"));
- Handle<Object> error_constructor = Object::GetProperty(
- js_builtins_object(), error_key).ToHandleChecked();
-
+ Handle<Object> error_constructor = error_function();
DisallowHeapAllocation no_gc;
for (PrototypeIterator iter(this, *obj, PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(); iter.Advance()) {
@@ -1620,12 +1607,6 @@ void Isolate::SetCaptureStackTraceForUncaughtExceptions(
}
-void Isolate::SetAbortOnUncaughtExceptionCallback(
- v8::Isolate::AbortOnUncaughtExceptionCallback callback) {
- abort_on_uncaught_exception_callback_ = callback;
-}
-
-
Handle<Context> Isolate::native_context() {
return handle(context()->native_context());
}
@@ -1773,15 +1754,12 @@ Isolate::Isolate(bool enable_serializer)
eternal_handles_(NULL),
thread_manager_(NULL),
has_installed_extensions_(false),
- string_tracker_(NULL),
regexp_stack_(NULL),
date_cache_(NULL),
call_descriptor_data_(NULL),
// TODO(bmeurer) Initialized lazily because it depends on flags; can
// be fixed once the default isolate cleanup is done.
random_number_generator_(NULL),
- store_buffer_hash_set_1_address_(NULL),
- store_buffer_hash_set_2_address_(NULL),
serializer_enabled_(enable_serializer),
has_fatal_error_(false),
initialized_from_snapshot_(false),
@@ -1796,8 +1774,7 @@ Isolate::Isolate(bool enable_serializer)
next_unique_sfi_id_(0),
#endif
use_counter_callback_(NULL),
- basic_block_profiler_(NULL),
- abort_on_uncaught_exception_callback_(NULL) {
+ basic_block_profiler_(NULL) {
{
base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
CHECK(thread_data_table_);
@@ -1906,6 +1883,9 @@ void Isolate::Deinit() {
Sampler* sampler = logger_->sampler();
if (sampler && sampler->IsActive()) sampler->Stop();
+ delete interpreter_;
+ interpreter_ = NULL;
+
delete deoptimizer_data_;
deoptimizer_data_ = NULL;
builtins_.TearDown();
@@ -1919,6 +1899,11 @@ void Isolate::Deinit() {
delete basic_block_profiler_;
basic_block_profiler_ = NULL;
+ for (Cancelable* task : cancelable_tasks_) {
+ task->Cancel();
+ }
+ cancelable_tasks_.clear();
+
heap_.TearDown();
logger_->TearDown();
@@ -2001,9 +1986,6 @@ Isolate::~Isolate() {
delete thread_manager_;
thread_manager_ = NULL;
- delete string_tracker_;
- string_tracker_ = NULL;
-
delete memory_allocator_;
memory_allocator_ = NULL;
delete code_range_;
@@ -2110,8 +2092,6 @@ bool Isolate::Init(Deserializer* des) {
FOR_EACH_ISOLATE_ADDRESS_NAME(ASSIGN_ELEMENT)
#undef ASSIGN_ELEMENT
- string_tracker_ = new StringTracker();
- string_tracker_->isolate_ = this;
compilation_cache_ = new CompilationCache(this);
keyed_lookup_cache_ = new KeyedLookupCache();
context_slot_cache_ = new ContextSlotCache();
@@ -2131,6 +2111,7 @@ bool Isolate::Init(Deserializer* des) {
new CallInterfaceDescriptorData[CallDescriptors::NUMBER_OF_DESCRIPTORS];
cpu_profiler_ = new CpuProfiler(this);
heap_profiler_ = new HeapProfiler(heap());
+ interpreter_ = new interpreter::Interpreter(this);
// Enable logging before setting up the heap
logger_->SetUp(this);
@@ -2192,12 +2173,22 @@ bool Isolate::Init(Deserializer* des) {
// occur, clearing/updating ICs.
runtime_profiler_ = new RuntimeProfiler(this);
+ if (create_heap_objects) {
+ if (!bootstrapper_->CreateCodeStubContext(this)) {
+ return false;
+ }
+ }
+
// If we are deserializing, read the state into the now-empty heap.
if (!create_heap_objects) {
des->Deserialize(this);
}
stub_cache_->Initialize();
+ if (FLAG_ignition) {
+ interpreter_->Initialize();
+ }
+
// Finish initialization of ThreadLocal after deserialization is done.
clear_pending_exception();
clear_pending_message();
@@ -2446,7 +2437,9 @@ bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
return cell_reports_intact;
}
- if (initial_array_proto->elements() != heap()->empty_fixed_array()) {
+ FixedArrayBase* elements = initial_array_proto->elements();
+ if (elements != heap()->empty_fixed_array() &&
+ elements != heap()->empty_slow_element_dictionary()) {
DCHECK_EQ(false, cell_reports_intact);
return cell_reports_intact;
}
@@ -2457,7 +2450,10 @@ bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
DCHECK_EQ(false, cell_reports_intact);
return cell_reports_intact;
}
- if (initial_object_proto->elements() != heap()->empty_fixed_array()) {
+
+ elements = initial_object_proto->elements();
+ if (elements != heap()->empty_fixed_array() &&
+ elements != heap()->empty_slow_element_dictionary()) {
DCHECK_EQ(false, cell_reports_intact);
return cell_reports_intact;
}
@@ -2633,7 +2629,7 @@ void Isolate::EnqueueMicrotask(Handle<Object> microtask) {
queue = factory()->NewFixedArray(8);
heap()->set_microtask_queue(*queue);
} else if (num_tasks == queue->length()) {
- queue = FixedArray::CopySize(queue, num_tasks * 2);
+ queue = factory()->CopyFixedArrayAndGrow(queue, num_tasks);
heap()->set_microtask_queue(*queue);
}
DCHECK(queue->get(num_tasks)->IsUndefined());
@@ -2643,13 +2639,6 @@ void Isolate::EnqueueMicrotask(Handle<Object> microtask) {
void Isolate::RunMicrotasks() {
- // %RunMicrotasks may be called in mjsunit tests, which violates
- // this assertion, hence the check for --allow-natives-syntax.
- // TODO(adamk): However, this also fails some layout tests.
- //
- // DCHECK(FLAG_allow_natives_syntax ||
- // handle_scope_implementer()->CallDepthIsZero());
-
// Increase call depth to prevent recursive callbacks.
v8::Isolate::SuppressMicrotaskExecutionScope suppress(
reinterpret_cast<v8::Isolate*>(this));
@@ -2741,7 +2730,7 @@ void Isolate::AddDetachedContext(Handle<Context> context) {
Handle<WeakCell> cell = factory()->NewWeakCell(context);
Handle<FixedArray> detached_contexts(heap()->detached_contexts());
int length = detached_contexts->length();
- detached_contexts = FixedArray::CopySize(detached_contexts, length + 2);
+ detached_contexts = factory()->CopyFixedArrayAndGrow(detached_contexts, 2);
detached_contexts->set(length, Smi::FromInt(0));
detached_contexts->set(length + 1, *cell);
heap()->set_detached_contexts(*detached_contexts);
@@ -2787,6 +2776,18 @@ void Isolate::CheckDetachedContextsAfterGC() {
}
+void Isolate::RegisterCancelableTask(Cancelable* task) {
+ cancelable_tasks_.insert(task);
+}
+
+
+void Isolate::RemoveCancelableTask(Cancelable* task) {
+ auto removed = cancelable_tasks_.erase(task);
+ USE(removed);
+ DCHECK(removed == 1);
+}
+
+
bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
StackGuard* stack_guard = isolate_->stack_guard();
#ifdef USE_SIMULATOR
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 876a72f327..3cc4bacfde 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -6,21 +6,26 @@
#define V8_ISOLATE_H_
#include <queue>
+#include <set>
+
#include "include/v8-debug.h"
#include "src/allocation.h"
#include "src/assert-scope.h"
#include "src/base/atomicops.h"
#include "src/builtins.h"
+#include "src/cancelable-task.h"
#include "src/contexts.h"
#include "src/date.h"
#include "src/execution.h"
#include "src/frames.h"
+#include "src/futex-emulation.h"
#include "src/global-handles.h"
#include "src/handles.h"
#include "src/hashmap.h"
#include "src/heap/heap.h"
+#include "src/messages.h"
#include "src/optimizing-compile-dispatcher.h"
-#include "src/regexp-stack.h"
+#include "src/regexp/regexp-stack.h"
#include "src/runtime/runtime.h"
#include "src/runtime-profiler.h"
#include "src/zone.h"
@@ -59,10 +64,12 @@ class HStatistics;
class HTracer;
class InlineRuntimeFunctionsTable;
class InnerPointerToCodeCache;
+class Logger;
class MaterializedObjectStore;
class CodeAgingHelper;
class RegExpStack;
class SaveContext;
+class StatsTable;
class StringTracker;
class StubCache;
class SweeperThread;
@@ -79,11 +86,13 @@ typedef void* ExternalReferenceRedirectorPointer();
class Debug;
-class Debugger;
class PromiseOnStack;
class Redirection;
class Simulator;
+namespace interpreter {
+class Interpreter;
+}
// Static indirection table for handles to constants. If a frame
// element represents a constant, the data contains an index into
@@ -378,7 +387,6 @@ typedef List<HeapObject*> DebugObjectCache;
V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
V(PromiseRejectCallback, promise_reject_callback, NULL) \
V(const v8::StartupData*, snapshot_blob, NULL) \
- V(bool, creating_default_snapshot, false) \
ISOLATE_INIT_SIMULATOR_LIST(V)
#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
@@ -690,9 +698,6 @@ class Isolate {
int frame_limit,
StackTrace::StackTraceOptions options);
- void SetAbortOnUncaughtExceptionCallback(
- v8::Isolate::AbortOnUncaughtExceptionCallback callback);
-
enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose };
void PrintCurrentStackTrace(FILE* out);
void PrintStack(StringStream* accumulator,
@@ -905,8 +910,6 @@ class Isolate {
ThreadManager* thread_manager() { return thread_manager_; }
- StringTracker* string_tracker() { return string_tracker_; }
-
unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
return &jsregexp_uncanonicalize_;
}
@@ -994,6 +997,10 @@ class Isolate {
date_cache_ = date_cache;
}
+ ErrorToStringHelper* error_tostring_helper() {
+ return &error_tostring_helper_;
+ }
+
Map* get_initial_js_array_map(ElementsKind kind,
Strength strength = Strength::WEAK);
@@ -1103,23 +1110,6 @@ class Isolate {
int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
#endif
- void set_store_buffer_hash_set_1_address(
- uintptr_t* store_buffer_hash_set_1_address) {
- store_buffer_hash_set_1_address_ = store_buffer_hash_set_1_address;
- }
-
- uintptr_t* store_buffer_hash_set_1_address() {
- return store_buffer_hash_set_1_address_;
- }
-
- void set_store_buffer_hash_set_2_address(
- uintptr_t* store_buffer_hash_set_2_address) {
- store_buffer_hash_set_2_address_ = store_buffer_hash_set_2_address;
- }
-
- uintptr_t* store_buffer_hash_set_2_address() {
- return store_buffer_hash_set_2_address_;
- }
void AddDetachedContext(Handle<Context> context);
void CheckDetachedContextsAfterGC();
@@ -1133,6 +1123,13 @@ class Isolate {
return array_buffer_allocator_;
}
+ FutexWaitListNode* futex_wait_list_node() { return &futex_wait_list_node_; }
+
+ void RegisterCancelableTask(Cancelable* task);
+ void RemoveCancelableTask(Cancelable* task);
+
+ interpreter::Interpreter* interpreter() const { return interpreter_; }
+
protected:
explicit Isolate(bool enable_serializer);
@@ -1283,19 +1280,16 @@ class Isolate {
RuntimeState runtime_state_;
Builtins builtins_;
bool has_installed_extensions_;
- StringTracker* string_tracker_;
unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
unibrow::Mapping<unibrow::Ecma262Canonicalize>
regexp_macro_assembler_canonicalize_;
RegExpStack* regexp_stack_;
DateCache* date_cache_;
+ ErrorToStringHelper error_tostring_helper_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CallInterfaceDescriptorData* call_descriptor_data_;
base::RandomNumberGenerator* random_number_generator_;
- // TODO(hpayer): Remove the following store buffer addresses.
- uintptr_t* store_buffer_hash_set_1_address_;
- uintptr_t* store_buffer_hash_set_2_address_;
// Whether the isolate has been created for snapshotting.
bool serializer_enabled_;
@@ -1320,6 +1314,8 @@ class Isolate {
HeapProfiler* heap_profiler_;
FunctionEntryHook function_entry_hook_;
+ interpreter::Interpreter* interpreter_;
+
typedef std::pair<InterruptCallback, void*> InterruptEntry;
std::queue<InterruptEntry> api_interrupts_queue_;
@@ -1366,8 +1362,9 @@ class Isolate {
v8::ArrayBuffer::Allocator* array_buffer_allocator_;
- v8::Isolate::AbortOnUncaughtExceptionCallback
- abort_on_uncaught_exception_callback_;
+ FutexWaitListNode futex_wait_list_node_;
+
+ std::set<Cancelable*> cancelable_tasks_;
friend class ExecutionAccess;
friend class HandleScopeImplementer;
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index f1fa5647f8..81c83bd1d8 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -5,13 +5,14 @@
#ifndef V8_JSON_PARSER_H_
#define V8_JSON_PARSER_H_
-#include "src/v8.h"
-
-#include "src/char-predicates-inl.h"
+#include "src/char-predicates.h"
#include "src/conversions.h"
-#include "src/heap/spaces-inl.h"
+#include "src/factory.h"
#include "src/messages.h"
+#include "src/scanner.h"
#include "src/token.h"
+#include "src/transitions.h"
+#include "src/types.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index 1ba99c1e9a..fb6b80dde4 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -5,9 +5,8 @@
#ifndef V8_JSON_STRINGIFIER_H_
#define V8_JSON_STRINGIFIER_H_
-#include "src/v8.h"
-
#include "src/conversions.h"
+#include "src/lookup.h"
#include "src/messages.h"
#include "src/string-builder.h"
#include "src/utils.h"
@@ -360,16 +359,11 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeGeneric(
Handle<Object> key,
bool deferred_comma,
bool deferred_key) {
- Handle<JSObject> builtins(isolate_->native_context()->builtins(), isolate_);
- Handle<JSFunction> builtin = Handle<JSFunction>::cast(
- Object::GetProperty(isolate_, builtins, "$jsonSerializeAdapter")
- .ToHandleChecked());
-
+ Handle<JSFunction> fun = isolate_->json_serialize_adapter();
Handle<Object> argv[] = { key, object };
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, result,
- Execution::Call(isolate_, builtin, object, 2, argv),
+ isolate_, result, Execution::Call(isolate_, fun, object, 2, argv),
EXCEPTION);
if (result->IsUndefined()) return UNCHANGED;
if (deferred_key) {
diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js
index e405f87bab..36fda8e1e0 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/json.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $jsonSerializeAdapter;
-
(function(global, utils) {
"use strict";
@@ -15,15 +13,18 @@ var $jsonSerializeAdapter;
var GlobalJSON = global.JSON;
var InternalArray = utils.InternalArray;
-
var MathMax;
var MathMin;
var ObjectHasOwnProperty;
+var ToNumber;
+var ToString;
utils.Import(function(from) {
MathMax = from.MathMax;
MathMin = from.MathMin;
ObjectHasOwnProperty = from.ObjectHasOwnProperty;
+ ToNumber = from.ToNumber;
+ ToString = from.ToString;
});
// -------------------------------------------------------------------
@@ -164,10 +165,10 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
if (IS_ARRAY(value)) {
return SerializeArray(value, replacer, stack, indent, gap);
} else if (IS_NUMBER_WRAPPER(value)) {
- value = $toNumber(value);
+ value = ToNumber(value);
return JSON_NUMBER_TO_STRING(value);
} else if (IS_STRING_WRAPPER(value)) {
- return %QuoteJSONString($toString(value));
+ return %QuoteJSONString(ToString(value));
} else if (IS_BOOLEAN_WRAPPER(value)) {
return %_ValueOf(value) ? "true" : "false";
} else {
@@ -196,7 +197,7 @@ function JSONStringify(value, replacer, space) {
} else if (IS_NUMBER(v)) {
item = %_NumberToString(v);
} else if (IS_STRING_WRAPPER(v) || IS_NUMBER_WRAPPER(v)) {
- item = $toString(v);
+ item = ToString(v);
} else {
continue;
}
@@ -210,9 +211,9 @@ function JSONStringify(value, replacer, space) {
if (IS_OBJECT(space)) {
// Unwrap 'space' if it is wrapped
if (IS_NUMBER_WRAPPER(space)) {
- space = $toNumber(space);
+ space = ToNumber(space);
} else if (IS_STRING_WRAPPER(space)) {
- space = $toString(space);
+ space = ToString(space);
}
}
var gap;
@@ -244,11 +245,15 @@ utils.InstallFunctions(GlobalJSON, DONT_ENUM, [
// -------------------------------------------------------------------
// JSON Builtins
-$jsonSerializeAdapter = function(key, object) {
+function JsonSerializeAdapter(key, object) {
var holder = {};
holder[key] = object;
// No need to pass the actual holder since there is no replacer function.
return JSONSerialize(key, holder, UNDEFINED, new InternalArray(), "", "");
}
+utils.ExportToRuntime(function(to) {
+ to.JsonSerializeAdapter = JsonSerializeAdapter;
+});
+
})
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/layout-descriptor-inl.h
index 77671328b4..3771064c8f 100644
--- a/deps/v8/src/layout-descriptor-inl.h
+++ b/deps/v8/src/layout-descriptor-inl.h
@@ -56,6 +56,11 @@ bool LayoutDescriptor::GetIndexes(int field_index, int* layout_word_index,
}
+LayoutDescriptor* LayoutDescriptor::SetRawData(int field_index) {
+ return SetTagged(field_index, false);
+}
+
+
LayoutDescriptor* LayoutDescriptor::SetTagged(int field_index, bool tagged) {
int layout_word_index;
int layout_bit_index;
@@ -161,7 +166,7 @@ int LayoutDescriptor::GetSlowModeBackingStoreLength(int length) {
int LayoutDescriptor::CalculateCapacity(Map* map, DescriptorArray* descriptors,
int num_descriptors) {
- int inobject_properties = map->inobject_properties();
+ int inobject_properties = map->GetInObjectProperties();
if (inobject_properties == 0) return 0;
DCHECK_LE(num_descriptors, descriptors->number_of_descriptors());
@@ -195,7 +200,7 @@ LayoutDescriptor* LayoutDescriptor::Initialize(
LayoutDescriptor* layout_descriptor, Map* map, DescriptorArray* descriptors,
int num_descriptors) {
DisallowHeapAllocation no_allocation;
- int inobject_properties = map->inobject_properties();
+ int inobject_properties = map->GetInObjectProperties();
for (int i = 0; i < num_descriptors; i++) {
PropertyDetails details = descriptors->GetDetails(i);
@@ -214,7 +219,7 @@ LayoutDescriptor* LayoutDescriptor::Initialize(
}
-// InobjectPropertiesHelper is a helper class for querying whether inobject
+// LayoutDescriptorHelper is a helper class for querying whether inobject
// property at offset is Double or not.
LayoutDescriptorHelper::LayoutDescriptorHelper(Map* map)
: all_fields_tagged_(true),
@@ -227,7 +232,7 @@ LayoutDescriptorHelper::LayoutDescriptorHelper(Map* map)
return;
}
- int inobject_properties = map->inobject_properties();
+ int inobject_properties = map->GetInObjectProperties();
DCHECK(inobject_properties > 0);
header_size_ = map->instance_size() - (inobject_properties * kPointerSize);
DCHECK(header_size_ >= 0);
diff --git a/deps/v8/src/layout-descriptor.cc b/deps/v8/src/layout-descriptor.cc
index 25cece822a..b961a7de96 100644
--- a/deps/v8/src/layout-descriptor.cc
+++ b/deps/v8/src/layout-descriptor.cc
@@ -46,7 +46,7 @@ Handle<LayoutDescriptor> LayoutDescriptor::ShareAppend(
Handle<LayoutDescriptor> layout_descriptor(map->GetLayoutDescriptor(),
isolate);
- if (!InobjectUnboxedField(map->inobject_properties(), details)) {
+ if (!InobjectUnboxedField(map->GetInObjectProperties(), details)) {
DCHECK(details.location() != kField ||
layout_descriptor->IsTagged(details.field_index()));
return layout_descriptor;
@@ -73,7 +73,7 @@ Handle<LayoutDescriptor> LayoutDescriptor::AppendIfFastOrUseFull(
if (layout_descriptor->IsSlowLayout()) {
return full_layout_descriptor;
}
- if (!InobjectUnboxedField(map->inobject_properties(), details)) {
+ if (!InobjectUnboxedField(map->GetInObjectProperties(), details)) {
DCHECK(details.location() != kField ||
layout_descriptor->IsTagged(details.field_index()));
return handle(layout_descriptor, map->GetIsolate());
diff --git a/deps/v8/src/layout-descriptor.h b/deps/v8/src/layout-descriptor.h
index 0a14f53198..11d8d35f26 100644
--- a/deps/v8/src/layout-descriptor.h
+++ b/deps/v8/src/layout-descriptor.h
@@ -124,9 +124,7 @@ class LayoutDescriptor : public FixedTypedArray<Uint32ArrayTraits> {
V8_INLINE bool GetIndexes(int field_index, int* layout_word_index,
int* layout_bit_index);
- V8_INLINE MUST_USE_RESULT LayoutDescriptor* SetRawData(int field_index) {
- return SetTagged(field_index, false);
- }
+ V8_INLINE MUST_USE_RESULT LayoutDescriptor* SetRawData(int field_index);
V8_INLINE MUST_USE_RESULT LayoutDescriptor* SetTagged(int field_index,
bool tagged);
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index b41c5852a8..ddceab5457 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -155,6 +155,15 @@ void DefaultPlatform::CallDelayedOnForegroundThread(Isolate* isolate,
}
+void DefaultPlatform::CallIdleOnForegroundThread(Isolate* isolate,
+ IdleTask* task) {
+ UNREACHABLE();
+}
+
+
+bool DefaultPlatform::IdleTasksEnabled(Isolate* isolate) { return false; }
+
+
double DefaultPlatform::MonotonicallyIncreasingTime() {
return base::TimeTicks::HighResolutionNow().ToInternalValue() /
static_cast<double>(base::Time::kMicrosecondsPerSecond);
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index fba5803f40..94ef9c5055 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -40,6 +40,9 @@ class DefaultPlatform : public Platform {
Task* task) override;
virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
double delay_in_seconds) override;
+ virtual void CallIdleOnForegroundThread(Isolate* isolate,
+ IdleTask* task) override;
+ virtual bool IdleTasksEnabled(Isolate* isolate) override;
double MonotonicallyIncreasingTime() override;
private:
diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h
index 98f0343fa5..94ef14dbae 100644
--- a/deps/v8/src/list-inl.h
+++ b/deps/v8/src/list-inl.h
@@ -125,6 +125,12 @@ bool List<T, P>::RemoveElement(const T& elm) {
return false;
}
+template <typename T, class P>
+void List<T, P>::Swap(List<T, P>* list) {
+ std::swap(data_, list->data_);
+ std::swap(length_, list->length_);
+ std::swap(capacity_, list->capacity_);
+}
template<typename T, class P>
void List<T, P>::Allocate(int length, P allocator) {
@@ -239,15 +245,6 @@ void List<T, P>::StableSort() {
}
-template <typename T, class P>
-void List<T, P>::Initialize(int capacity, P allocator) {
- DCHECK(capacity >= 0);
- data_ = (capacity > 0) ? NewData(capacity, allocator) : NULL;
- capacity_ = capacity;
- length_ = 0;
-}
-
-
template <typename T, typename P>
int SortedListBSearch(const List<T>& list, P cmp) {
int low = 0;
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index b636449c42..d935f764b8 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -5,6 +5,8 @@
#ifndef V8_LIST_H_
#define V8_LIST_H_
+#include <algorithm>
+
#include "src/checks.h"
#include "src/utils.h"
@@ -137,6 +139,9 @@ class List {
// Drop the last 'count' elements from the list.
INLINE(void RewindBy(int count)) { Rewind(length_ - count); }
+ // Swaps the contents of the two lists.
+ INLINE(void Swap(List<T, AllocationPolicy>* list));
+
// Halve the capacity if fill level is less than a quarter.
INLINE(void Trim(AllocationPolicy allocator = AllocationPolicy()));
@@ -161,7 +166,12 @@ class List {
void StableSort();
INLINE(void Initialize(int capacity,
- AllocationPolicy allocator = AllocationPolicy()));
+ AllocationPolicy allocator = AllocationPolicy())) {
+ DCHECK(capacity >= 0);
+ data_ = (capacity > 0) ? NewData(capacity, allocator) : NULL;
+ capacity_ = capacity;
+ length_ = 0;
+ }
private:
T* data_;
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 79d6cfe5f0..afa28bbb40 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -1343,7 +1343,7 @@ void LAllocator::BuildLiveRanges() {
DCHECK(chunk_->info()->IsOptimizing());
AllowHandleDereference allow_deref;
PrintF("Function: %s\n",
- chunk_->info()->function()->debug_name()->ToCString().get());
+ chunk_->info()->literal()->debug_name()->ToCString().get());
}
PrintF("Value %d used before first definition!\n", operand_index);
LiveRange* range = LiveRangeFor(operand_index);
diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h
index 2a8080011f..7c94772450 100644
--- a/deps/v8/src/lithium-allocator.h
+++ b/deps/v8/src/lithium-allocator.h
@@ -5,8 +5,6 @@
#ifndef V8_LITHIUM_ALLOCATOR_H_
#define V8_LITHIUM_ALLOCATOR_H_
-#include "src/v8.h"
-
#include "src/allocation.h"
#include "src/lithium.h"
#include "src/zone.h"
diff --git a/deps/v8/src/lithium-codegen.h b/deps/v8/src/lithium-codegen.h
index fddc1b2599..ce04da9006 100644
--- a/deps/v8/src/lithium-codegen.h
+++ b/deps/v8/src/lithium-codegen.h
@@ -5,8 +5,6 @@
#ifndef V8_LITHIUM_CODEGEN_H_
#define V8_LITHIUM_CODEGEN_H_
-#include "src/v8.h"
-
#include "src/bailout-reason.h"
#include "src/compiler.h"
#include "src/deoptimizer.h"
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index 835ed8e21e..ff9af685d1 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -2,10 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/log-utils.h"
+
+#include "src/assert-scope.h"
+#include "src/base/platform/platform.h"
+#include "src/objects-inl.h"
#include "src/string-stream.h"
+#include "src/utils.h"
#include "src/version.h"
namespace v8 {
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index afc3521c60..87dab52406 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -6,6 +6,8 @@
#define V8_LOG_UTILS_H_
#include "src/allocation.h"
+#include "src/base/platform/mutex.h"
+#include "src/flags.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 8f47e81f0e..044250f119 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -288,6 +288,12 @@ void PerfBasicLogger::LogRecordedBuffer(Code* code,
int length) {
DCHECK(code->instruction_start() == code->address() + Code::kHeaderSize);
+ if (FLAG_perf_basic_prof_only_functions &&
+ (code->kind() != Code::FUNCTION &&
+ code->kind() != Code::OPTIMIZED_FUNCTION)) {
+ return;
+ }
+
base::OS::FPrint(perf_output_handle_, "%llx %x %.*s\n",
reinterpret_cast<uint64_t>(code->instruction_start()),
code->instruction_size(), length, name);
@@ -1016,10 +1022,10 @@ void Logger::ApiNamedPropertyAccess(const char* tag,
DCHECK(name->IsName());
if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = holder->class_name();
- SmartArrayPointer<char> class_name =
+ base::SmartArrayPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
if (name->IsString()) {
- SmartArrayPointer<char> property_name =
+ base::SmartArrayPointer<char> property_name =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\",\"%s\"", tag, class_name.get(),
property_name.get());
@@ -1029,8 +1035,9 @@ void Logger::ApiNamedPropertyAccess(const char* tag,
if (symbol->name()->IsUndefined()) {
ApiEvent("api,%s,\"%s\",symbol(hash %x)", tag, class_name.get(), hash);
} else {
- SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
- DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ base::SmartArrayPointer<char> str =
+ String::cast(symbol->name())
+ ->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\",symbol(\"%s\" hash %x)", tag, class_name.get(),
str.get(), hash);
}
@@ -1042,7 +1049,7 @@ void Logger::ApiIndexedPropertyAccess(const char* tag,
uint32_t index) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = holder->class_name();
- SmartArrayPointer<char> class_name =
+ base::SmartArrayPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\",%u", tag, class_name.get(), index);
}
@@ -1051,7 +1058,7 @@ void Logger::ApiIndexedPropertyAccess(const char* tag,
void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = object->class_name();
- SmartArrayPointer<char> class_name =
+ base::SmartArrayPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\"", tag, class_name.get());
}
@@ -1089,7 +1096,7 @@ void Logger::CallbackEventInternal(const char* prefix, Name* name,
kLogEventsNames[CALLBACK_TAG]);
msg.AppendAddress(entry_point);
if (name->IsString()) {
- SmartArrayPointer<char> str =
+ base::SmartArrayPointer<char> str =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append(",1,\"%s%s\"", prefix, str.get());
} else {
@@ -1097,8 +1104,9 @@ void Logger::CallbackEventInternal(const char* prefix, Name* name,
if (symbol->name()->IsUndefined()) {
msg.Append(",1,symbol(hash %x)", prefix, symbol->Hash());
} else {
- SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
- DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ base::SmartArrayPointer<char> str =
+ String::cast(symbol->name())
+ ->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append(",1,symbol(\"%s\" hash %x)", prefix, str.get(),
symbol->Hash());
}
@@ -1192,7 +1200,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(&msg, tag, code);
if (name->IsString()) {
- SmartArrayPointer<char> str =
+ base::SmartArrayPointer<char> str =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("\"%s\"", str.get());
} else {
@@ -1222,12 +1230,12 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(&msg, tag, code);
- SmartArrayPointer<char> name =
+ base::SmartArrayPointer<char> name =
shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("\"%s ", name.get());
if (source->IsString()) {
- SmartArrayPointer<char> sourcestr =
- String::cast(source)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ base::SmartArrayPointer<char> sourcestr = String::cast(source)->ToCString(
+ DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("%s", sourcestr.get());
} else {
msg.AppendSymbolName(Symbol::cast(source));
@@ -1265,7 +1273,7 @@ void Logger::CodeDisableOptEvent(Code* code,
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
msg.Append("%s,", kLogEventsNames[CODE_DISABLE_OPT_EVENT]);
- SmartArrayPointer<char> name =
+ base::SmartArrayPointer<char> name =
shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("\"%s\",", name.get());
msg.Append("\"%s\"", GetBailoutReason(shared->disable_optimization_reason()));
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 81793d344e..a98e5d9a11 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -59,9 +59,22 @@ void LookupIterator::RestartLookupForNonMaskingInterceptors() {
}
-Handle<JSReceiver> LookupIterator::GetRoot(Handle<Object> receiver,
- Isolate* isolate) {
+// static
+Handle<JSReceiver> LookupIterator::GetRoot(Isolate* isolate,
+ Handle<Object> receiver,
+ uint32_t index) {
if (receiver->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver);
+ // Strings are the only objects with properties (only elements) directly on
+ // the wrapper. Hence we can skip generating the wrapper for all other cases.
+ if (index != kMaxUInt32 && receiver->IsString() &&
+ index < static_cast<uint32_t>(String::cast(*receiver)->length())) {
+ // TODO(verwaest): Speed this up. Perhaps use a cached wrapper on the native
+ // context, ensuring that we don't leak it into JS?
+ Handle<JSFunction> constructor = isolate->string_function();
+ Handle<JSObject> result = isolate->factory()->NewJSObject(constructor);
+ Handle<JSValue>::cast(result)->set_value(*receiver);
+ return result;
+ }
auto root = handle(receiver->GetRootMap(isolate)->prototype(), isolate);
if (root->IsNull()) {
unsigned int magic = 0xbbbbbbbb;
@@ -72,7 +85,7 @@ Handle<JSReceiver> LookupIterator::GetRoot(Handle<Object> receiver,
Handle<Map> LookupIterator::GetReceiverMap() const {
- if (receiver_->IsNumber()) return isolate_->factory()->heap_number_map();
+ if (receiver_->IsNumber()) return factory()->heap_number_map();
return handle(Handle<HeapObject>::cast(receiver_)->map(), isolate_);
}
@@ -104,8 +117,7 @@ void LookupIterator::ReloadPropertyInformation() {
void LookupIterator::ReloadHolderMap() {
DCHECK_EQ(DATA, state_);
DCHECK(IsElement());
- DCHECK(JSObject::cast(*holder_)->HasExternalArrayElements() ||
- JSObject::cast(*holder_)->HasFixedTypedArrayElements());
+ DCHECK(JSObject::cast(*holder_)->HasFixedTypedArrayElements());
if (*holder_map_ != holder_->map()) {
holder_map_ = handle(holder_->map(), isolate_);
}
@@ -119,29 +131,15 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
Handle<JSObject> holder = GetHolder<JSObject>();
if (IsElement()) {
- ElementsKind old_kind = holder_map_->elements_kind();
- holder_map_ = Map::PrepareForDataElement(holder_map_, value);
- ElementsKind new_kind = holder_map_->elements_kind();
- if (new_kind != old_kind) {
- // TODO(verwaest): Handle element migration in MigrateToMap.
- JSObject::UpdateAllocationSite(holder, new_kind);
- if (IsFastDoubleElementsKind(old_kind) !=
- IsFastDoubleElementsKind(new_kind)) {
- uint32_t capacity = holder->elements()->length();
- ElementsAccessor* accessor = ElementsAccessor::ForKind(new_kind);
- accessor->GrowCapacityAndConvert(holder, capacity);
- // GrowCapacityAndConvert migrated the object. No reloading of property
- // infomation is necessary for elements.
- return;
- } else if (FLAG_trace_elements_transitions) {
- Handle<FixedArrayBase> elements(holder->elements());
- JSObject::PrintElementsTransition(stdout, holder, old_kind, elements,
- new_kind, elements);
- }
- }
+ ElementsKind kind = holder_map_->elements_kind();
+ ElementsKind to = value->OptimalElementsKind();
+ if (IsHoleyElementsKind(kind)) to = GetHoleyElementsKind(to);
+ to = IsMoreGeneralElementsKindTransition(kind, to) ? to : kind;
+ JSObject::TransitionElementsKind(holder, to);
+ holder_map_ = handle(holder->map(), isolate_);
// Copy the backing store if it is copy-on-write.
- if (IsFastSmiOrObjectElementsKind(new_kind)) {
+ if (IsFastSmiOrObjectElementsKind(to)) {
JSObject::EnsureWritableFastElements(holder);
}
@@ -162,7 +160,6 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
DCHECK(HolderIsReceiverOrHiddenPrototype());
Handle<JSObject> holder = GetHolder<JSObject>();
if (IsElement()) {
- DCHECK(!holder->HasExternalArrayElements());
DCHECK(!holder->HasFixedTypedArrayElements());
DCHECK(attributes != NONE || !holder->HasFastElements());
Handle<FixedArrayBase> elements(holder->elements());
@@ -181,6 +178,13 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
}
ReloadPropertyInformation();
+ WriteDataValue(value);
+
+#if VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ holder->JSObjectVerify();
+ }
+#endif
}
@@ -264,20 +268,19 @@ void LookupIterator::TransitionToAccessorProperty(
// handled via a trap. Adding properties to primitive values is not
// observable.
Handle<JSObject> receiver = GetStoreTarget();
- holder_ = receiver;
- holder_map_ =
- Map::TransitionToAccessorProperty(handle(receiver->map(), isolate_),
- name_, component, accessor, attributes);
- JSObject::MigrateToMap(receiver, holder_map_);
- ReloadPropertyInformation();
+ if (!IsElement() && !receiver->map()->is_dictionary_map()) {
+ holder_ = receiver;
+ holder_map_ = Map::TransitionToAccessorProperty(
+ handle(receiver->map(), isolate_), name_, component, accessor,
+ attributes);
+ JSObject::MigrateToMap(receiver, holder_map_);
- if (!holder_map_->is_dictionary_map()) return;
+ ReloadPropertyInformation();
+ if (!holder_map_->is_dictionary_map()) return;
+ }
- // Install the accessor into the dictionary-mode object.
- PropertyDetails details(attributes, ACCESSOR_CONSTANT, 0,
- PropertyCellType::kMutable);
Handle<AccessorPair> pair;
if (state() == ACCESSOR && GetAccessors()->IsAccessorPair()) {
pair = Handle<AccessorPair>::cast(GetAccessors());
@@ -289,12 +292,62 @@ void LookupIterator::TransitionToAccessorProperty(
pair->set(component, *accessor);
}
} else {
- pair = isolate()->factory()->NewAccessorPair();
+ pair = factory()->NewAccessorPair();
pair->set(component, *accessor);
}
- JSObject::SetNormalizedProperty(receiver, name_, pair, details);
- JSObject::ReoptimizeIfPrototype(receiver);
+ TransitionToAccessorPair(pair, attributes);
+
+#if VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ receiver->JSObjectVerify();
+ }
+#endif
+}
+
+
+void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
+ PropertyAttributes attributes) {
+ Handle<JSObject> receiver = GetStoreTarget();
+ holder_ = receiver;
+
+ PropertyDetails details(attributes, ACCESSOR_CONSTANT, 0,
+ PropertyCellType::kMutable);
+
+ if (IsElement()) {
+ // TODO(verwaest): Move code into the element accessor.
+ Handle<SeededNumberDictionary> dictionary =
+ JSObject::NormalizeElements(receiver);
+
+ // We unconditionally pass used_as_prototype=false here because the call
+ // to RequireSlowElements takes care of the required IC clearing and
+ // we don't want to walk the heap twice.
+ dictionary =
+ SeededNumberDictionary::Set(dictionary, index_, pair, details, false);
+ receiver->RequireSlowElements(*dictionary);
+
+ if (receiver->HasSlowArgumentsElements()) {
+ FixedArray* parameter_map = FixedArray::cast(receiver->elements());
+ uint32_t length = parameter_map->length() - 2;
+ if (number_ < length) {
+ parameter_map->set(number_ + 2, heap()->the_hole_value());
+ }
+ FixedArray::cast(receiver->elements())->set(1, *dictionary);
+ } else {
+ receiver->set_elements(*dictionary);
+ }
+ } else {
+ PropertyNormalizationMode mode = receiver->map()->is_prototype_map()
+ ? KEEP_INOBJECT_PROPERTIES
+ : CLEAR_INOBJECT_PROPERTIES;
+ // Normalize object to make this operation simple.
+ JSObject::NormalizeProperties(receiver, mode, 0,
+ "TransitionToAccessorPair");
+
+ JSObject::SetNormalizedProperty(receiver, name_, pair, details);
+ JSObject::ReoptimizeIfPrototype(receiver);
+ }
+
holder_map_ = handle(receiver->map(), isolate_);
ReloadPropertyInformation();
}
@@ -340,7 +393,7 @@ Handle<Object> LookupIterator::FetchValue() const {
}
ElementsAccessor* accessor = holder->GetElementsAccessor();
- return accessor->Get(holder, index_);
+ return accessor->Get(handle(holder->elements()), number_);
} else if (holder_map_->IsGlobalObjectMap()) {
result = holder->global_dictionary()->ValueAt(number_);
DCHECK(result->IsPropertyCell());
@@ -425,7 +478,7 @@ void LookupIterator::WriteDataValue(Handle<Object> value) {
Handle<JSObject> holder = GetHolder<JSObject>();
if (IsElement()) {
ElementsAccessor* accessor = holder->GetElementsAccessor();
- accessor->Set(holder->elements(), index_, *value);
+ accessor->Set(holder->elements(), number_, *value);
} else if (holder->IsGlobalObject()) {
Handle<GlobalDictionary> property_dictionary =
handle(holder->global_dictionary());
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index 90edd8b43d..3888ed6240 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -56,7 +56,7 @@ class LookupIterator final BASE_EMBEDDED {
// kMaxUInt32 isn't a valid index.
index_(kMaxUInt32),
receiver_(receiver),
- holder_(GetRoot(receiver_, isolate_)),
+ holder_(GetRoot(isolate_, receiver)),
holder_map_(holder_->map(), isolate_),
initial_holder_(holder_),
number_(DescriptorArray::kNotFound) {
@@ -102,7 +102,7 @@ class LookupIterator final BASE_EMBEDDED {
name_(),
index_(index),
receiver_(receiver),
- holder_(GetRoot(receiver_, isolate_)),
+ holder_(GetRoot(isolate, receiver, index)),
holder_map_(holder_->map(), isolate_),
initial_holder_(holder_),
number_(DescriptorArray::kNotFound) {
@@ -168,7 +168,7 @@ class LookupIterator final BASE_EMBEDDED {
Handle<Name> GetName() {
if (name_.is_null()) {
DCHECK(IsElement());
- name_ = isolate_->factory()->Uint32ToString(index_);
+ name_ = factory()->Uint32ToString(index_);
}
return name_;
}
@@ -183,6 +183,7 @@ class LookupIterator final BASE_EMBEDDED {
state_ = NOT_FOUND;
}
+ Heap* heap() const { return isolate_->heap(); }
Factory* factory() const { return isolate_->factory(); }
Handle<Object> GetReceiver() const { return receiver_; }
Handle<JSObject> GetStoreTarget() const;
@@ -196,7 +197,8 @@ class LookupIterator final BASE_EMBEDDED {
DCHECK(IsFound());
return Handle<T>::cast(holder_);
}
- static Handle<JSReceiver> GetRoot(Handle<Object> receiver, Isolate* isolate);
+ static Handle<JSReceiver> GetRoot(Isolate* isolate, Handle<Object> receiver,
+ uint32_t index = kMaxUInt32);
bool HolderIsReceiverOrHiddenPrototype() const;
/* ACCESS_CHECK */
@@ -220,6 +222,8 @@ class LookupIterator final BASE_EMBEDDED {
void TransitionToAccessorProperty(AccessorComponent component,
Handle<Object> accessor,
PropertyAttributes attributes);
+ void TransitionToAccessorPair(Handle<Object> pair,
+ PropertyAttributes attributes);
PropertyDetails property_details() const {
DCHECK(has_property_);
return property_details_;
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 5e28c66a83..49b2cad8bd 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -98,6 +98,7 @@ macro IS_ARRAY(arg) = (%_IsArray(arg));
macro IS_DATE(arg) = (%_IsDate(arg));
macro IS_FUNCTION(arg) = (%_IsFunction(arg));
macro IS_REGEXP(arg) = (%_IsRegExp(arg));
+macro IS_SIMD_VALUE(arg) = (%_IsSimdValue(arg));
macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
macro IS_MAP(arg) = (%_ClassOf(arg) === 'Map');
macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
@@ -116,7 +117,6 @@ macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator');
macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator');
-macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
macro IS_STRONG(arg) = (%IsStrong(arg));
# Macro for ECMAScript 5 queries of the type:
@@ -135,7 +135,7 @@ macro IS_SPEC_FUNCTION(arg) = (%_ClassOf(arg) === 'Function');
# Macro for ES6 CheckObjectCoercible
# Will throw a TypeError of the form "[functionName] called on null or undefined".
-macro CHECK_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL_OR_UNDEFINED(arg) && !IS_UNDETECTABLE(arg)) throw MakeTypeError(kCalledOnNullOrUndefined, functionName);
+macro CHECK_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL(%IS_VAR(arg)) || IS_UNDEFINED(arg)) throw MakeTypeError(kCalledOnNullOrUndefined, functionName);
# Indices in bound function info retrieved by %BoundFunctionGetBindings(...).
define kBoundFunctionIndex = 0;
@@ -145,14 +145,14 @@ define kBoundArgumentsStartIndex = 2;
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
-macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger($toNumber(arg)));
-macro TO_INTEGER_FOR_SIDE_EFFECT(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : $toNumber(arg));
-macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero($toNumber(arg)));
-macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
+macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg)));
+macro TO_INTEGER_FOR_SIDE_EFFECT(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToNumber(arg));
+macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
+macro TO_INT32(arg) = (arg | 0);
macro TO_UINT32(arg) = (arg >>> 0);
macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : $nonStringToString(arg));
macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : $nonNumberToNumber(arg));
-macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : $toObject(arg));
+macro TO_OBJECT(arg) = (%_ToObject(arg));
macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
macro HAS_OWN_PROPERTY(arg, index) = (%_CallFunction(arg, index, ObjectHasOwnProperty));
macro SHOULD_CREATE_WRAPPER(functionName, receiver) = (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(functionName));
diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js
index fc3bf2fcda..51f4d445c7 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/math.js
@@ -12,6 +12,7 @@ var rngstate; // Initialized to a Uint32Array during genesis.
// -------------------------------------------------------------------
// Imports
+var GlobalMath = global.Math;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
@@ -287,20 +288,10 @@ function CubeRoot(x) {
// -------------------------------------------------------------------
-// Instance class name can only be set on functions. That is the only
-// purpose for MathConstructor.
-function MathConstructor() {}
-
-var Math = new MathConstructor();
-
-%InternalSetPrototype(Math, GlobalObject.prototype);
-%AddNamedProperty(global, "Math", Math, DONT_ENUM);
-%FunctionSetInstanceClassName(MathConstructor, 'Math');
-
-%AddNamedProperty(Math, symbolToStringTag, "Math", READ_ONLY | DONT_ENUM);
+%AddNamedProperty(GlobalMath, symbolToStringTag, "Math", READ_ONLY | DONT_ENUM);
// Set up math constants.
-utils.InstallConstants(Math, [
+utils.InstallConstants(GlobalMath, [
// ECMA-262, section 15.8.1.1.
"E", 2.7182818284590452354,
// ECMA-262, section 15.8.1.2.
@@ -317,7 +308,7 @@ utils.InstallConstants(Math, [
// Set up non-enumerable functions of the Math object and
// set their names.
-utils.InstallFunctions(Math, DONT_ENUM, [
+utils.InstallFunctions(GlobalMath, DONT_ENUM, [
"random", MathRandom,
"abs", MathAbs,
"acos", MathAcosJS,
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 7193392d9d..3de6717cbc 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -6,7 +6,6 @@
#include "src/api.h"
#include "src/execution.h"
-#include "src/heap/spaces-inl.h"
#include "src/messages.h"
#include "src/string-builder.h"
@@ -19,13 +18,13 @@ namespace internal {
void MessageHandler::DefaultMessageReport(Isolate* isolate,
const MessageLocation* loc,
Handle<Object> message_obj) {
- SmartArrayPointer<char> str = GetLocalizedMessage(isolate, message_obj);
+ base::SmartArrayPointer<char> str = GetLocalizedMessage(isolate, message_obj);
if (loc == NULL) {
PrintF("%s\n", str.get());
} else {
HandleScope scope(isolate);
Handle<Object> data(loc->script()->name(), isolate);
- SmartArrayPointer<char> data_str;
+ base::SmartArrayPointer<char> data_str;
if (data->IsString())
data_str = Handle<String>::cast(data)->ToCString(DISALLOW_NULLS);
PrintF("%s:%i: %s\n", data_str.get() ? data_str.get() : "<unknown>",
@@ -82,7 +81,7 @@ void MessageHandler::ReportMessage(Isolate* isolate, MessageLocation* loc,
Handle<Object> argument(message->argument(), isolate);
Handle<Object> args[] = {argument};
MaybeHandle<Object> maybe_stringified = Execution::TryCall(
- isolate->to_detail_string_fun(), isolate->js_builtins_object(),
+ isolate->to_detail_string_fun(), isolate->factory()->undefined_value(),
arraysize(args), args);
Handle<Object> stringified;
if (!maybe_stringified.ToHandle(&stringified)) {
@@ -133,9 +132,8 @@ Handle<String> MessageHandler::GetMessage(Isolate* isolate,
}
-SmartArrayPointer<char> MessageHandler::GetLocalizedMessage(
- Isolate* isolate,
- Handle<Object> data) {
+base::SmartArrayPointer<char> MessageHandler::GetLocalizedMessage(
+ Isolate* isolate, Handle<Object> data) {
HandleScope scope(isolate);
return GetMessage(isolate, data)->ToCString(DISALLOW_NULLS);
}
@@ -299,14 +297,10 @@ Handle<String> MessageTemplate::FormatMessage(Isolate* isolate,
if (arg->IsString()) {
result_string = Handle<String>::cast(arg);
} else {
- Handle<String> fmt_str = factory->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("$noSideEffectToString"));
- Handle<JSFunction> fun = Handle<JSFunction>::cast(
- Object::GetProperty(isolate->js_builtins_object(), fmt_str)
- .ToHandleChecked());
+ Handle<JSFunction> fun = isolate->no_side_effect_to_string_fun();
MaybeHandle<Object> maybe_result =
- Execution::TryCall(fun, isolate->js_builtins_object(), 1, &arg);
+ Execution::TryCall(fun, factory->undefined_value(), 1, &arg);
Handle<Object> result;
if (!maybe_result.ToHandle(&result) || !result->IsString()) {
return factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("<error>"));
@@ -328,23 +322,29 @@ Handle<String> MessageTemplate::FormatMessage(Isolate* isolate,
}
-MaybeHandle<String> MessageTemplate::FormatMessage(int template_index,
- Handle<String> arg0,
- Handle<String> arg1,
- Handle<String> arg2) {
- Isolate* isolate = arg0->GetIsolate();
- const char* template_string;
+const char* MessageTemplate::TemplateString(int template_index) {
switch (template_index) {
-#define CASE(NAME, STRING) \
- case k##NAME: \
- template_string = STRING; \
- break;
+#define CASE(NAME, STRING) \
+ case k##NAME: \
+ return STRING;
MESSAGE_TEMPLATES(CASE)
#undef CASE
case kLastMessage:
default:
- isolate->ThrowIllegalOperation();
- return MaybeHandle<String>();
+ return NULL;
+ }
+}
+
+
+MaybeHandle<String> MessageTemplate::FormatMessage(int template_index,
+ Handle<String> arg0,
+ Handle<String> arg1,
+ Handle<String> arg2) {
+ Isolate* isolate = arg0->GetIsolate();
+ const char* template_string = TemplateString(template_index);
+ if (template_string == NULL) {
+ isolate->ThrowIllegalOperation();
+ return MaybeHandle<String>();
}
IncrementalStringBuilder builder(isolate);
@@ -369,5 +369,98 @@ MaybeHandle<String> MessageTemplate::FormatMessage(int template_index,
return builder.Finish();
}
+
+
+MaybeHandle<String> ErrorToStringHelper::Stringify(Isolate* isolate,
+ Handle<JSObject> error) {
+ VisitedScope scope(this, error);
+ if (scope.has_visited()) return isolate->factory()->empty_string();
+
+ Handle<String> name;
+ Handle<String> message;
+ Handle<Name> internal_key = isolate->factory()->internal_error_symbol();
+ Handle<String> message_string =
+ isolate->factory()->NewStringFromStaticChars("message");
+ Handle<String> name_string = isolate->factory()->name_string();
+ LookupIterator internal_error_lookup(
+ error, internal_key, LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ LookupIterator message_lookup(
+ error, message_string, LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ LookupIterator name_lookup(error, name_string,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+
+ // Find out whether an internally created error object is on the prototype
+ // chain. If the name property is found on a holder prior to the internally
+ // created error object, use that name property. Otherwise just use the
+ // constructor name to avoid triggering possible side effects.
+ // Similar for the message property. If the message property shadows the
+ // internally created error object, use that message property. Otherwise
+ // use empty string as message.
+ if (internal_error_lookup.IsFound()) {
+ if (!ShadowsInternalError(isolate, &name_lookup, &internal_error_lookup)) {
+ Handle<JSObject> holder = internal_error_lookup.GetHolder<JSObject>();
+ name = Handle<String>(holder->constructor_name());
+ }
+ if (!ShadowsInternalError(isolate, &message_lookup,
+ &internal_error_lookup)) {
+ message = isolate->factory()->empty_string();
+ }
+ }
+ if (name.is_null()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, name,
+ GetStringifiedProperty(isolate, &name_lookup,
+ isolate->factory()->Error_string()),
+ String);
+ }
+ if (message.is_null()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, message,
+ GetStringifiedProperty(isolate, &message_lookup,
+ isolate->factory()->empty_string()),
+ String);
+ }
+
+ if (name->length() == 0) return message;
+ if (message->length() == 0) return name;
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendString(name);
+ builder.AppendCString(": ");
+ builder.AppendString(message);
+ return builder.Finish();
+}
+
+
+bool ErrorToStringHelper::ShadowsInternalError(
+ Isolate* isolate, LookupIterator* property_lookup,
+ LookupIterator* internal_error_lookup) {
+ if (!property_lookup->IsFound()) return false;
+ Handle<JSObject> holder = property_lookup->GetHolder<JSObject>();
+ // It's fine if the property is defined on the error itself.
+ if (holder.is_identical_to(property_lookup->GetReceiver())) return true;
+ PrototypeIterator it(isolate, holder, PrototypeIterator::START_AT_RECEIVER);
+ while (true) {
+ if (it.IsAtEnd()) return false;
+ if (it.IsAtEnd(internal_error_lookup->GetHolder<JSObject>())) return true;
+ it.AdvanceIgnoringProxies();
+ }
+}
+
+
+MaybeHandle<String> ErrorToStringHelper::GetStringifiedProperty(
+ Isolate* isolate, LookupIterator* property_lookup,
+ Handle<String> default_value) {
+ if (!property_lookup->IsFound()) return default_value;
+ Handle<Object> obj;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, Object::GetProperty(property_lookup),
+ String);
+ if (obj->IsUndefined()) return default_value;
+ if (!obj->IsString()) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, Execution::ToString(isolate, obj),
+ String);
+ }
+ return Handle<String>::cast(obj);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 5c3e867933..779e60e57f 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -10,33 +10,15 @@
#ifndef V8_MESSAGES_H_
#define V8_MESSAGES_H_
-// Forward declaration of MessageLocation.
-namespace v8 {
-namespace internal {
-class MessageLocation;
-} } // namespace v8::internal
-
-
-class V8Message {
- public:
- V8Message(char* type,
- v8::internal::Handle<v8::internal::JSArray> args,
- const v8::internal::MessageLocation* loc) :
- type_(type), args_(args), loc_(loc) { }
- char* type() const { return type_; }
- v8::internal::Handle<v8::internal::JSArray> args() const { return args_; }
- const v8::internal::MessageLocation* loc() const { return loc_; }
- private:
- char* type_;
- v8::internal::Handle<v8::internal::JSArray> const args_;
- const v8::internal::MessageLocation* loc_;
-};
-
+#include "src/base/smart-pointers.h"
+#include "src/list.h"
namespace v8 {
namespace internal {
-struct Language;
+// Forward declarations.
+class JSMessageObject;
+class LookupIterator;
class SourceInfo;
class MessageLocation {
@@ -91,6 +73,7 @@ class CallSite {
/* Error */ \
T(None, "") \
T(CyclicProto, "Cyclic __proto__ value") \
+ T(Debugger, "Debugger: %") \
T(DebuggerLoading, "Error loading debugger") \
T(DefaultOptionsMissing, "Internal % error. Default options are missing.") \
T(UncaughtException, "Uncaught %") \
@@ -119,6 +102,8 @@ class CallSite {
T(DataViewNotArrayBuffer, \
"First argument to DataView constructor must be an ArrayBuffer") \
T(DateType, "this is not a Date object.") \
+ T(DebuggerFrame, "Debugger: Invalid frame index.") \
+ T(DebuggerType, "Debugger: Parameters have wrong types.") \
T(DefineDisallowed, "Cannot define property:%, object is not extensible.") \
T(DuplicateTemplateProperty, "Object template has duplicate property '%'") \
T(ExtendsValueGenerator, \
@@ -163,6 +148,7 @@ class CallSite {
T(NotTypedArray, "this is not a typed array.") \
T(NotSharedTypedArray, "% is not a shared typed array.") \
T(NotIntegerSharedTypedArray, "% is not an integer shared typed array.") \
+ T(NotInt32SharedTypedArray, "% is not an int32 shared typed array.") \
T(ObjectGetterExpectingFunction, \
"Object.prototype.__defineGetter__: Expecting function") \
T(ObjectGetterCallable, "Getter must be a function: %") \
@@ -231,6 +217,7 @@ class CallSite {
"In strong mode, calling a function with too few arguments is deprecated") \
T(StrongDeleteProperty, \
"Deleting property '%' of strong object '%' is deprecated") \
+ T(StrongExtendNull, "In strong mode, classes extending null are deprecated") \
T(StrongImplicitConversion, \
"In strong mode, implicit conversions are deprecated") \
T(StrongRedefineDisallowed, \
@@ -239,10 +226,9 @@ class CallSite {
T(StrongSetProto, \
"On strong object %, redefining the internal prototype is deprecated") \
T(SymbolKeyFor, "% is not a symbol") \
- T(SymbolToPrimitive, \
- "Cannot convert a Symbol wrapper object to a primitive value") \
T(SymbolToNumber, "Cannot convert a Symbol value to a number") \
T(SymbolToString, "Cannot convert a Symbol value to a string") \
+ T(SimdToNumber, "Cannot convert a SIMD value to a number") \
T(UndefinedOrNullToObject, "Cannot convert undefined or null to object") \
T(ValueAndAccessor, \
"Invalid property. A property cannot both have accessors and be " \
@@ -297,8 +283,6 @@ class CallSite {
T(ConstructorIsGenerator, "Class constructor may not be a generator") \
T(DerivedConstructorReturn, \
"Derived constructors may only return object or undefined") \
- T(DuplicateArrawFunFormalParam, \
- "Arrow function may not have duplicate parameter names") \
T(DuplicateConstructor, "A class may only have one constructor") \
T(DuplicateExport, "Duplicate export of '%'") \
T(DuplicateProto, \
@@ -335,6 +319,7 @@ class CallSite {
T(ParamAfterRest, "Rest parameter must be last formal parameter") \
T(BadSetterRestParameter, \
"Setter function argument must not be a rest parameter") \
+ T(ParamDupe, "Duplicate parameter name not allowed in this context") \
T(ParenthesisInArgString, "Function arg string contains parenthesis") \
T(SingleFunctionLiteral, "Single function literal required") \
T(SloppyLexical, \
@@ -346,8 +331,6 @@ class CallSite {
"In strict mode code, functions can only be declared at top level or " \
"immediately within another function.") \
T(StrictOctalLiteral, "Octal literals are not allowed in strict mode.") \
- T(StrictParamDupe, \
- "Strict mode function may not have duplicate parameter names") \
T(StrictWith, "Strict mode code may not include a with statement") \
T(StrongArguments, \
"In strong mode, 'arguments' is deprecated, use '...args' instead") \
@@ -411,6 +394,7 @@ class CallSite {
T(UnexpectedTokenIdentifier, "Unexpected identifier") \
T(UnexpectedTokenNumber, "Unexpected number") \
T(UnexpectedTokenString, "Unexpected string") \
+ T(UnexpectedTokenRegExp, "Unexpected regular expression") \
T(UnknownLabel, "Undefined label '%'") \
T(UnterminatedArgList, "missing ) after argument list") \
T(UnterminatedRegExp, "Invalid regular expression: missing /") \
@@ -430,6 +414,8 @@ class MessageTemplate {
kLastMessage
};
+ static const char* TemplateString(int template_index);
+
static MaybeHandle<String> FormatMessage(int template_index,
Handle<String> arg0,
Handle<String> arg1,
@@ -456,8 +442,48 @@ class MessageHandler {
static void DefaultMessageReport(Isolate* isolate, const MessageLocation* loc,
Handle<Object> message_obj);
static Handle<String> GetMessage(Isolate* isolate, Handle<Object> data);
- static SmartArrayPointer<char> GetLocalizedMessage(Isolate* isolate,
- Handle<Object> data);
+ static base::SmartArrayPointer<char> GetLocalizedMessage(Isolate* isolate,
+ Handle<Object> data);
+};
+
+
+class ErrorToStringHelper {
+ public:
+ ErrorToStringHelper() : visited_(0) {}
+
+ MUST_USE_RESULT MaybeHandle<String> Stringify(Isolate* isolate,
+ Handle<JSObject> error);
+
+ private:
+ class VisitedScope {
+ public:
+ VisitedScope(ErrorToStringHelper* helper, Handle<JSObject> error)
+ : helper_(helper), has_visited_(false) {
+ for (const Handle<JSObject>& visited : helper->visited_) {
+ if (visited.is_identical_to(error)) {
+ has_visited_ = true;
+ break;
+ }
+ }
+ helper->visited_.Add(error);
+ }
+ ~VisitedScope() { helper_->visited_.RemoveLast(); }
+ bool has_visited() { return has_visited_; }
+
+ private:
+ ErrorToStringHelper* helper_;
+ bool has_visited_;
+ };
+
+ static bool ShadowsInternalError(Isolate* isolate,
+ LookupIterator* property_lookup,
+ LookupIterator* internal_error_lookup);
+
+ static MUST_USE_RESULT MaybeHandle<String> GetStringifiedProperty(
+ Isolate* isolate, LookupIterator* property_lookup,
+ Handle<String> default_value);
+
+ List<Handle<JSObject> > visited_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index d7ca7cd647..32766a89fe 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -5,21 +5,8 @@
// -------------------------------------------------------------------
var $errorToString;
-var $getStackTraceLine;
-var $messageGetPositionInLine;
-var $messageGetLineNumber;
-var $messageGetSourceLine;
-var $noSideEffectToString;
-var $stackOverflowBoilerplate;
+var $internalErrorSymbol;
var $stackTraceSymbol;
-var $toDetailString;
-var $Error;
-var $EvalError;
-var $RangeError;
-var $ReferenceError;
-var $SyntaxError;
-var $TypeError;
-var $URIError;
var MakeError;
var MakeEvalError;
var MakeRangeError;
@@ -35,24 +22,45 @@ var MakeURIError;
// -------------------------------------------------------------------
// Imports
+var ArrayJoin;
+var Bool16x8ToString;
+var Bool32x4ToString;
+var Bool8x16ToString;
+var Float32x4ToString;
+var FunctionSourceString
var GlobalObject = global.Object;
+var Int16x8ToString;
+var Int32x4ToString;
+var Int8x16ToString;
var InternalArray = utils.InternalArray;
-var ObjectDefineProperty = utils.ObjectDefineProperty;
-
-var ArrayJoin;
+var ObjectDefineProperty;
var ObjectToString;
var StringCharAt;
var StringIndexOf;
var StringSubstring;
+var ToString;
utils.Import(function(from) {
ArrayJoin = from.ArrayJoin;
+ Bool16x8ToString = from.Bool16x8ToString;
+ Bool32x4ToString = from.Bool32x4ToString;
+ Bool8x16ToString = from.Bool8x16ToString;
+ Float32x4ToString = from.Float32x4ToString;
+ FunctionSourceString = from.FunctionSourceString;
+ Int16x8ToString = from.Int16x8ToString;
+ Int32x4ToString = from.Int32x4ToString;
+ Int8x16ToString = from.Int8x16ToString;
+ ObjectDefineProperty = from.ObjectDefineProperty;
ObjectToString = from.ObjectToString;
StringCharAt = from.StringCharAt;
StringIndexOf = from.StringIndexOf;
StringSubstring = from.StringSubstring;
});
+utils.ImportNow(function(from) {
+ ToString = from.ToString;
+});
+
// -------------------------------------------------------------------
var GlobalError;
@@ -65,9 +73,9 @@ var GlobalEvalError;
function NoSideEffectsObjectToString() {
- if (IS_UNDEFINED(this) && !IS_UNDETECTABLE(this)) return "[object Undefined]";
+ if (IS_UNDEFINED(this)) return "[object Undefined]";
if (IS_NULL(this)) return "[object Null]";
- return "[object " + %_ClassOf(TO_OBJECT_INLINE(this)) + "]";
+ return "[object " + %_ClassOf(TO_OBJECT(this)) + "]";
}
@@ -78,7 +86,7 @@ function NoSideEffectToString(obj) {
if (IS_UNDEFINED(obj)) return 'undefined';
if (IS_NULL(obj)) return 'null';
if (IS_FUNCTION(obj)) {
- var str = %_CallFunction(obj, obj, $functionSourceString);
+ var str = %_CallFunction(obj, obj, FunctionSourceString);
if (str.length > 128) {
str = %_SubString(str, 0, 111) + "...<omitted>..." +
%_SubString(str, str.length - 2, str.length);
@@ -86,6 +94,17 @@ function NoSideEffectToString(obj) {
return str;
}
if (IS_SYMBOL(obj)) return %_CallFunction(obj, $symbolToString);
+ if (IS_SIMD_VALUE(obj)) {
+ switch (typeof(obj)) {
+ case 'float32x4': return %_CallFunction(obj, Float32x4ToString);
+ case 'int32x4': return %_CallFunction(obj, Int32x4ToString);
+ case 'bool32x4': return %_CallFunction(obj, Bool32x4ToString);
+ case 'int16x8': return %_CallFunction(obj, Int16x8ToString);
+ case 'bool16x8': return %_CallFunction(obj, Bool16x8ToString);
+ case 'int16x8': return %_CallFunction(obj, Int16x8ToString);
+ case 'bool16x8': return %_CallFunction(obj, Bool16x8ToString);
+ }
+ }
if (IS_OBJECT(obj)
&& %GetDataProperty(obj, "toString") === ObjectToString) {
var constructor = %GetDataProperty(obj, "constructor");
@@ -133,7 +152,7 @@ function ToStringCheckErrorObject(obj) {
if (CanBeSafelyTreatedAsAnErrorObject(obj)) {
return %_CallFunction(obj, ErrorToString);
} else {
- return $toString(obj);
+ return ToString(obj);
}
}
@@ -153,8 +172,9 @@ function ToDetailString(obj) {
function MakeGenericError(constructor, type, arg0, arg1, arg2) {
- if (IS_UNDEFINED(arg0) && IS_STRING(type)) arg0 = [];
- return new constructor(FormatMessage(type, arg0, arg1, arg2));
+ var error = new constructor(FormatMessage(type, arg0, arg1, arg2));
+ error[$internalErrorSymbol] = true;
+ return error;
}
@@ -193,6 +213,16 @@ function GetLineNumber(message) {
}
+//Returns the offset of the given position within the containing line.
+function GetColumnNumber(message) {
+ var script = %MessageGetScript(message);
+ var start_position = %MessageGetStartPosition(message);
+ var location = script.locationFromPosition(start_position, true);
+ if (location == null) return -1;
+ return location.column;
+}
+
+
// Returns the source code line containing the given source
// position, or the empty string if the position is invalid.
function GetSourceLine(message) {
@@ -203,6 +233,7 @@ function GetSourceLine(message) {
return location.sourceText();
}
+
/**
* Find a line number given a specific source position.
* @param {number} position The source position.
@@ -536,17 +567,6 @@ utils.SetUpLockedPrototype(SourceSlice,
);
-// Returns the offset of the given position within the containing
-// line.
-function GetPositionInLine(message) {
- var script = %MessageGetScript(message);
- var start_position = %MessageGetStartPosition(message);
- var location = script.locationFromPosition(start_position, false);
- if (location == null) return -1;
- return start_position - location.start;
-}
-
-
function GetStackTraceLine(recv, fun, pos, isGlobal) {
return new CallSite(recv, fun, pos, false).toString();
}
@@ -953,7 +973,7 @@ function DefineError(global, f) {
// object. This avoids going through getters and setters defined
// on prototype objects.
if (!IS_UNDEFINED(m)) {
- %AddNamedProperty(this, 'message', $toString(m), DONT_ENUM);
+ %AddNamedProperty(this, 'message', ToString(m), DONT_ENUM);
}
} else {
return new f(m);
@@ -973,103 +993,27 @@ GlobalURIError = DefineError(global, function URIError() { });
%AddNamedProperty(GlobalError.prototype, 'message', '', DONT_ENUM);
-// Global list of error objects visited during ErrorToString. This is
-// used to detect cycles in error toString formatting.
-var visited_errors = new InternalArray();
-var cyclic_error_marker = new GlobalObject();
-
-function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
- var current = error;
- // Climb the prototype chain until we find the holder.
- while (current && !%HasOwnProperty(current, name)) {
- current = %_GetPrototype(current);
- }
- if (IS_NULL(current)) return UNDEFINED;
- if (!IS_OBJECT(current)) return error[name];
- // If the property is an accessor on one of the predefined errors that can be
- // generated statically by the compiler, don't touch it. This is to address
- // http://code.google.com/p/chromium/issues/detail?id=69187
- var desc = %GetOwnProperty(current, name);
- if (desc && desc[IS_ACCESSOR_INDEX]) {
- var isName = name === "name";
- if (current === GlobalReferenceError.prototype)
- return isName ? "ReferenceError" : UNDEFINED;
- if (current === GlobalSyntaxError.prototype)
- return isName ? "SyntaxError" : UNDEFINED;
- if (current === GlobalTypeError.prototype)
- return isName ? "TypeError" : UNDEFINED;
- }
- // Otherwise, read normally.
- return error[name];
-}
-
-function ErrorToStringDetectCycle(error) {
- if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
- try {
- var name = GetPropertyWithoutInvokingMonkeyGetters(error, "name");
- name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
- var message = GetPropertyWithoutInvokingMonkeyGetters(error, "message");
- message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message);
- if (name === "") return message;
- if (message === "") return name;
- return name + ": " + message;
- } finally {
- visited_errors.length = visited_errors.length - 1;
- }
-}
-
function ErrorToString() {
if (!IS_SPEC_OBJECT(this)) {
throw MakeTypeError(kCalledOnNonObject, "Error.prototype.toString");
}
- try {
- return ErrorToStringDetectCycle(this);
- } catch(e) {
- // If this error message was encountered already return the empty
- // string for it instead of recursively formatting it.
- if (e === cyclic_error_marker) {
- return '';
- }
- throw e;
- }
+ return %ErrorToStringRT(this);
}
utils.InstallFunctions(GlobalError.prototype, DONT_ENUM,
['toString', ErrorToString]);
$errorToString = ErrorToString;
-$getStackTraceLine = GetStackTraceLine;
-$messageGetPositionInLine = GetPositionInLine;
-$messageGetLineNumber = GetLineNumber;
-$messageGetSourceLine = GetSourceLine;
-$noSideEffectToString = NoSideEffectToString;
-$toDetailString = ToDetailString;
-
-$Error = GlobalError;
-$EvalError = GlobalEvalError;
-$RangeError = GlobalRangeError;
-$ReferenceError = GlobalReferenceError;
-$SyntaxError = GlobalSyntaxError;
-$TypeError = GlobalTypeError;
-$URIError = GlobalURIError;
MakeError = function(type, arg0, arg1, arg2) {
return MakeGenericError(GlobalError, type, arg0, arg1, arg2);
}
-MakeEvalError = function(type, arg0, arg1, arg2) {
- return MakeGenericError(GlobalEvalError, type, arg0, arg1, arg2);
-}
-
MakeRangeError = function(type, arg0, arg1, arg2) {
return MakeGenericError(GlobalRangeError, type, arg0, arg1, arg2);
}
-MakeReferenceError = function(type, arg0, arg1, arg2) {
- return MakeGenericError(GlobalReferenceError, type, arg0, arg1, arg2);
-}
-
MakeSyntaxError = function(type, arg0, arg1, arg2) {
return MakeGenericError(GlobalSyntaxError, type, arg0, arg1, arg2);
}
@@ -1084,8 +1028,8 @@ MakeURIError = function() {
// Boilerplate for exceptions for stack overflows. Used from
// Isolate::StackOverflow().
-$stackOverflowBoilerplate = MakeRangeError(kStackOverflow);
-%DefineAccessorPropertyUnchecked($stackOverflowBoilerplate, 'stack',
+var StackOverflowBoilerplate = MakeRangeError(kStackOverflow);
+%DefineAccessorPropertyUnchecked(StackOverflowBoilerplate, 'stack',
StackTraceGetter, StackTraceSetter,
DONT_ENUM);
@@ -1100,4 +1044,22 @@ captureStackTrace = function captureStackTrace(obj, cons_opt) {
GlobalError.captureStackTrace = captureStackTrace;
+utils.ExportToRuntime(function(to) {
+ to.Error = GlobalError;
+ to.EvalError = GlobalEvalError;
+ to.RangeError = GlobalRangeError;
+ to.ReferenceError = GlobalReferenceError;
+ to.SyntaxError = GlobalSyntaxError;
+ to.TypeError = GlobalTypeError;
+ to.URIError = GlobalURIError;
+ to.GetStackTraceLine = GetStackTraceLine;
+ to.NoSideEffectToString = NoSideEffectToString;
+ to.ToDetailString = ToDetailString;
+ to.MakeError = MakeGenericError;
+ to.MessageGetLineNumber = GetLineNumber;
+ to.MessageGetColumnNumber = GetColumnNumber;
+ to.MessageGetSourceLine = GetSourceLine;
+ to.StackOverflowBoilerplate = StackOverflowBoilerplate;
+});
+
});
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index bb422a3fcd..f4bddf5461 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -40,7 +40,7 @@
#include "src/mips/assembler-mips.h"
#include "src/assembler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
namespace v8 {
@@ -117,7 +117,7 @@ int FPURegister::ToAllocationIndex(FPURegister reg) {
// -----------------------------------------------------------------------------
// RelocInfo.
-void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+void RelocInfo::apply(intptr_t delta) {
if (IsCodeTarget(rmode_)) {
uint32_t scope1 = (uint32_t) target_address() & ~kImm28Mask;
uint32_t scope2 = reinterpret_cast<uint32_t>(pc_) & ~kImm28Mask;
@@ -195,11 +195,6 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
-Address Assembler::break_address_from_return_address(Address pc) {
- return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
-}
-
-
void Assembler::set_target_internal_reference_encoded_at(Address pc,
Address target) {
// Encoded internal references are lui/ori load of 32-bit abolute address.
@@ -359,22 +354,18 @@ void RelocInfo::set_code_age_stub(Code* stub,
}
-Address RelocInfo::call_address() {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- // The pc_ offset of 0 assumes mips patched return sequence per
- // debug-mips.cc BreakLocation::SetDebugBreakAtReturn(), or
- // debug break slot per BreakLocation::SetDebugBreakAtSlot().
+Address RelocInfo::debug_call_address() {
+ // The pc_ offset of 0 assumes patched debug break slot or return
+ // sequence.
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
return Assembler::target_address_at(pc_, host_);
}
-void RelocInfo::set_call_address(Address target) {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- // The pc_ offset of 0 assumes mips patched return sequence per
- // debug-mips.cc BreakLocation::SetDebugBreakAtReturn(), or
- // debug break slot per BreakLocation::SetDebugBreakAtSlot().
+void RelocInfo::set_debug_call_address(Address target) {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+ // The pc_ offset of 0 assumes patched debug break slot or return
+ // sequence.
Assembler::set_target_address_at(pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -384,23 +375,6 @@ void RelocInfo::set_call_address(Address target) {
}
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-Object** RelocInfo::call_object_address() {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@@ -449,11 +423,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- isolate->debug()->has_break_points()) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
@@ -477,11 +448,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 2d1ac059a5..e219cc748a 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -32,9 +32,6 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/base/bits.h"
@@ -665,7 +662,7 @@ int Assembler::target_at(int pos, bool is_internal) {
// Check we have a branch or jump instruction.
DCHECK(IsBranch(instr) || IsLui(instr));
// Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
- // the compiler uses arithmectic shifts for signed integers.
+ // the compiler uses arithmetic shifts for signed integers.
if (IsBranch(instr)) {
int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
@@ -1406,11 +1403,11 @@ void Assembler::j(int32_t target) {
#if DEBUG
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
- (kImm26Bits + kImmFieldShift)) == 0;
+ bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
DCHECK(in_range && ((target & 3) == 0));
#endif
- GenInstrJump(J, target >> 2);
+ GenInstrJump(J, (target >> 2) & kImm26Mask);
}
@@ -1432,12 +1429,12 @@ void Assembler::jal(int32_t target) {
#ifdef DEBUG
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
- (kImm26Bits + kImmFieldShift)) == 0;
+ bool in_range = ((ipc ^ static_cast<uint32_t>(target)) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
DCHECK(in_range && ((target & 3) == 0));
#endif
positions_recorder()->WriteRecordedPositions();
- GenInstrJump(JAL, target >> 2);
+ GenInstrJump(JAL, (target >> 2) & kImm26Mask);
}
@@ -2826,10 +2823,10 @@ void Assembler::emit_code_stub_address(Code* stub) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(pc_, rmode, data, NULL);
- if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
+ if (rmode >= RelocInfo::COMMENT &&
+ rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL) {
// Adjust code for new modes.
DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode));
// These modes do not need an entry in the constant pool.
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index a44a16837b..4db04b065f 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -525,9 +525,6 @@ class Assembler : public AssemblerBase {
// of that call in the instruction stream.
inline static Address target_address_from_return_address(Address pc);
- // Return the code target address of the patch debug break slot
- inline static Address break_address_from_return_address(Address pc);
-
static void JumpToJumpRegister(Address pc);
static void QuietNaN(HeapObject* nan);
@@ -575,25 +572,14 @@ class Assembler : public AssemblerBase {
// target and the return address.
static const int kCallTargetAddressOffset = 4 * kInstrSize;
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- static const int kPatchReturnSequenceAddressOffset = 0;
-
// Distance between start of patched debug break slot and the emitted address
// to jump to.
- static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
+ static const int kPatchDebugBreakSlotAddressOffset = 4 * kInstrSize;
// Difference between address of current opcode and value read from pc
// register.
static const int kPcLoadDelta = 4;
- static const int kPatchDebugBreakSlotReturnOffset = 4 * kInstrSize;
-
- // Number of instructions used for the JS return sequence. The constant is
- // used by the debugger to patch the JS return sequence.
- static const int kJSReturnSequenceInstructions = 7;
- static const int kJSReturnSequenceLength =
- kJSReturnSequenceInstructions * kInstrSize;
static const int kDebugBreakSlotInstructions = 4;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
@@ -1058,11 +1044,11 @@ class Assembler : public AssemblerBase {
// Debugging.
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
+ // Mark generator continuation.
+ void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot();
+ void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index b5a67b47dc..b9607e05c4 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -2,16 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/codegen.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
@@ -316,39 +313,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
-static void Generate_Runtime_NewObject(MacroAssembler* masm,
- bool create_memento,
- Register original_constructor,
- Label* count_incremented,
- Label* allocated) {
- if (create_memento) {
- // Get the cell or allocation site.
- __ lw(a2, MemOperand(sp, 2 * kPointerSize));
- __ push(a2);
- }
-
- __ push(a1); // argument for Runtime_NewObject
- __ push(original_constructor); // original constructor
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- } else {
- __ CallRuntime(Runtime::kNewObject, 2);
- }
- __ mov(t4, v0);
-
- // Runtime_NewObjectWithAllocationSite increments allocation count.
- // Skip the increment.
- if (create_memento) {
- __ jmp(count_incremented);
- } else {
- __ jmp(allocated);
- }
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -364,53 +330,33 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Isolate* isolate = masm->isolate();
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- ra : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
- if (create_memento) {
- __ AssertUndefinedOrAllocationSite(a2, t0);
- __ push(a2);
- }
-
// Preserve the incoming parameters on the stack.
+ __ AssertUndefinedOrAllocationSite(a2, t0);
__ SmiTag(a0);
- if (use_new_target) {
- __ Push(a0, a1, a3);
- } else {
- __ Push(a0, a1);
- }
-
- Label rt_call, allocated, normal_new, count_incremented;
- __ Branch(&normal_new, eq, a1, Operand(a3));
-
- // Original constructor and function are different.
- Generate_Runtime_NewObject(masm, create_memento, a3, &count_incremented,
- &allocated);
- __ bind(&normal_new);
+ __ Push(a2, a0, a1, a3);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
if (FLAG_inline_new) {
- Label undo_allocation;
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(isolate);
__ li(a2, Operand(debug_step_in_fp));
__ lw(a2, MemOperand(a2));
__ Branch(&rt_call, ne, a2, Operand(zero_reg));
+ // Fall back to runtime if the original constructor and function differ.
+ __ Branch(&rt_call, ne, a1, Operand(a3));
+
// Load the initial map and verify that it is in fact a map.
// a1: constructor function
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(a2, &rt_call);
- __ GetObjectType(a2, a3, t4);
+ __ GetObjectType(a2, t5, t4);
__ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
// Check that the constructor is not constructing a JSFunction (see
@@ -418,8 +364,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map's instance type would be JS_FUNCTION_TYPE.
// a1: constructor function
// a2: initial map
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
+ __ lbu(t5, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Branch(&rt_call, eq, t5, Operand(JS_FUNCTION_TYPE));
if (!is_api_function) {
Label allocate;
@@ -446,12 +392,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Now allocate the JSObject on the heap.
// a1: constructor function
// a2: initial map
+ Label rt_call_reload_new_target;
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
if (create_memento) {
__ Addu(a3, a3, Operand(AllocationMemento::kSize / kPointerSize));
}
- __ Allocate(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
+ __ Allocate(a3, t4, t5, t6, &rt_call_reload_new_target, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
@@ -489,7 +436,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Operand(Map::kSlackTrackingCounterEnd));
// Allocate object with a slack.
- __ lbu(a0, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset));
+ __ lbu(
+ a0,
+ FieldMemOperand(
+ a2, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
+ __ lbu(a2, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ __ subu(a0, a0, a2);
__ sll(at, a0, kPointerSizeLog2);
__ addu(a0, t5, at);
// a0: offset of first field after pre-allocated fields
@@ -520,7 +472,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ sw(t7, MemOperand(t5));
__ Addu(t5, t5, kPointerSize);
// Load the AllocationSite.
- __ lw(t7, MemOperand(sp, 2 * kPointerSize));
+ __ lw(t7, MemOperand(sp, 3 * kPointerSize));
+ __ AssertUndefinedOrAllocationSite(a2, t0);
DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
__ sw(t7, MemOperand(t5));
__ Addu(t5, t5, kPointerSize);
@@ -531,110 +484,49 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
+ // and jump into the continuation code at any time from now on.
__ Addu(t4, t4, Operand(kHeapObjectTag));
- // Check if a non-empty properties array is needed. Continue with
- // allocated object if not; allocate and initialize a FixedArray if yes.
- // a1: constructor function
- // t4: JSObject
- // t5: start of next object (not tagged)
- __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields
- // and in-object properties.
- __ lbu(t6, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset));
- __ Addu(a3, a3, Operand(t6));
- __ lbu(t6, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
- __ subu(a3, a3, t6);
-
- // Done if no extra properties are to be allocated.
- __ Branch(&allocated, eq, a3, Operand(zero_reg));
- __ Assert(greater_equal, kPropertyAllocationCountFailed,
- a3, Operand(zero_reg));
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // a1: constructor
- // a3: number of elements in properties array
- // t4: JSObject
- // t5: start of next object
- __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ Allocate(
- a0,
- t5,
- t6,
- a2,
- &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
- // Initialize the FixedArray.
- // a1: constructor
- // a3: number of elements in properties array (untagged)
- // t4: JSObject
- // t5: start of FixedArray (untagged)
- __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
- __ mov(a2, t5);
- __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
- __ sll(a0, a3, kSmiTagSize);
- __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
- __ Addu(a2, a2, Operand(2 * kPointerSize));
-
- DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
- DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
-
- // Initialize the fields to undefined.
- // a1: constructor
- // a2: First element of FixedArray (not tagged)
- // a3: number of elements in properties array
- // t4: JSObject
- // t5: FixedArray (not tagged)
- __ sll(t3, a3, kPointerSizeLog2);
- __ addu(t6, a2, t3); // End of object.
- DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- if (!is_api_function || create_memento) {
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
- __ Assert(eq, kUndefinedValueNotLoaded, t7, Operand(t2));
- }
- __ InitializeFieldsWithFiller(a2, t6, t7);
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject.
- // a1: constructor function
- // t4: JSObject
- // t5: FixedArray (not tagged)
- __ Addu(t5, t5, Operand(kHeapObjectTag)); // Add the heap tag.
- __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
-
// Continue with JSObject being successfully allocated.
- // a1: constructor function
- // a4: JSObject
+ // t4: JSObject
__ jmp(&allocated);
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // t4: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(t4, t5);
+ // Reload the original constructor and fall-through.
+ __ bind(&rt_call_reload_new_target);
+ __ lw(a3, MemOperand(sp, 0 * kPointerSize));
}
// Allocate the new receiver object using the runtime call.
// a1: constructor function
+ // a3: original constructor
__ bind(&rt_call);
- Generate_Runtime_NewObject(masm, create_memento, a1, &count_incremented,
- &allocated);
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ lw(a2, MemOperand(sp, 3 * kPointerSize));
+ __ push(a2); // argument 1: allocation site
+ }
+
+ __ Push(a1, a3); // arguments 2-3 / 1-2
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 2);
+ }
+ __ mov(t4, v0);
+
+ // Runtime_NewObjectWithAllocationSite increments allocation count.
+ // Skip the increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
// Receiver for constructor call allocated.
// t4: JSObject
__ bind(&allocated);
if (create_memento) {
- int offset = (use_new_target ? 3 : 2) * kPointerSize;
- __ lw(a2, MemOperand(sp, offset));
+ __ lw(a2, MemOperand(sp, 3 * kPointerSize));
__ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
__ Branch(&count_incremented, eq, a2, Operand(t5));
// a2 is an AllocationSite. We are creating a memento from it, so we
@@ -648,20 +540,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore the parameters.
- if (use_new_target) {
- __ Pop(a3); // new.target
- }
+ __ Pop(a3); // new.target
__ Pop(a1);
// Retrieve smi-tagged arguments count from the stack.
__ lw(a0, MemOperand(sp));
__ SmiUntag(a0);
- if (use_new_target) {
- __ Push(a3, t4, t4);
- } else {
- __ Push(t4, t4);
- }
+ __ Push(a3, t4, t4);
// Set up pointer to last argument.
__ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -673,8 +559,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a3: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: new.target (if used)
- // sp[2/3]: number of arguments (smi-tagged)
+ // sp[2]: new.target
+ // sp[3]: number of arguments (smi-tagged)
Label loop, entry;
__ SmiTag(a3, a0);
__ jmp(&entry);
@@ -701,9 +587,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- // TODO(arv): Remove the "!use_new_target" before supporting optimization
- // of functions that reference new.target
- if (!is_api_function && !use_new_target) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@@ -718,8 +602,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense.
// v0: result
// sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (if used)
- // sp[1/2]: number of arguments (smi-tagged)
+ // sp[1]: new.target
+ // sp[2]: number of arguments (smi-tagged)
__ JumpIfSmi(v0, &use_receiver);
// If the type of the result (stored in its map) is less than
@@ -737,10 +621,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&exit);
// v0: result
// sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (if used)
- // sp[1/2]: number of arguments (smi-tagged)
- int offset = (use_new_target ? 2 : 1) * kPointerSize;
- __ lw(a1, MemOperand(sp, offset));
+ // sp[1]: new.target (original constructor)
+ // sp[2]: number of arguments (smi-tagged)
+ __ lw(a1, MemOperand(sp, 2 * kPointerSize));
// Leave construct frame.
}
@@ -754,17 +637,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, true, false);
}
@@ -778,12 +656,12 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
- // TODO(dslomov): support pretenuring
- CHECK(!FLAG_pretenuring_call_new);
-
{
FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
+ __ AssertUndefinedOrAllocationSite(a2, t0);
+ __ push(a2);
+
__ mov(t0, a0);
__ SmiTag(t0);
__ push(t0); // Smi-tagged arguments count.
@@ -982,6 +860,148 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// o a1: the JS function object being called.
+// o cp: our context
+// o fp: the caller's frame pointer
+// o sp: stack pointer
+// o ra: return address
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-mips.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+
+ __ Push(ra, fp, cp, a1);
+ __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into kInterpreterBytecodeRegister.
+ __ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister, t0);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, t0,
+ Operand(zero_reg));
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, t0, t0);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, t0,
+ Operand(BYTECODE_ARRAY_TYPE));
+ }
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size from the BytecodeArray object.
+ __ lw(t0, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ __ Subu(t1, sp, Operand(t0));
+ __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ __ Branch(&ok, hs, t1, Operand(a2));
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ Label loop_header;
+ Label loop_check;
+ __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ __ push(t1);
+ // Continue loop if not done.
+ __ bind(&loop_check);
+ __ Subu(t0, t0, Operand(kPointerSize));
+ __ Branch(&loop_header, ge, t0, Operand(zero_reg));
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Support profiler (specifically profiling_counter).
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Allow simulator stop operations if FLAG_stop_at is set.
+ // - Deal with sloppy mode functions which need to replace the
+ // receiver with the global proxy when called as functions (without an
+ // explicit receiver object).
+ // - Code aging of the BytecodeArray object.
+ // - Supporting FLAG_trace.
+ //
+ // The following items are also not done here, and will probably be done using
+ // explicit bytecodes instead:
+ // - Allocating a new local context if applicable.
+ // - Setting up a local binding to the this function, which is used in
+ // derived constructors with super calls.
+ // - Setting new.target if required.
+ // - Dealing with REST parameters (only if
+ // https://codereview.chromium.org/1235153006 doesn't land by then).
+ // - Dealing with argument objects.
+
+ // Perform stack guard check.
+ {
+ Label ok;
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(at));
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ bind(&ok);
+ }
+
+ // Load bytecode offset and dispatch table into registers.
+ __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ Subu(
+ kInterpreterRegisterFileRegister, fp,
+ Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ Addu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Dispatch to the first bytecode handler for the function.
+ __ Addu(a0, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ lbu(a0, MemOperand(a0));
+ __ sll(at, a0, kPointerSizeLog2);
+ __ Addu(at, kInterpreterDispatchTableRegister, at);
+ __ lw(at, MemOperand(at));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(at);
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // The return value is in accumulator, which is already in v0.
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ // Drop receiver + arguments.
+ __ Drop(1); // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+ __ Jump(ra);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@@ -1296,8 +1316,10 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ sll(a0, a0, kSmiTagSize); // Smi tagged.
- __ Push(a0, a2);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ push(a0);
+ __ mov(a0, a2);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mov(a2, v0);
__ pop(a0);
@@ -1410,6 +1432,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int vectorOffset,
const int argumentsOffset,
const int indexOffset,
const int limitOffset) {
@@ -1427,12 +1450,9 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ lw(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
- FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
- Handle<TypeFeedbackVector> feedback_vector =
- masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
- int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
- __ li(slot, Operand(Smi::FromInt(index)));
- __ li(vector, feedback_vector);
+ int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
+ __ li(slot, Operand(Smi::FromInt(slot_index)));
+ __ lw(vector, MemOperand(fp, vectorOffset));
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -1466,6 +1486,13 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
const int kReceiverOffset = kArgumentsOffset + kPointerSize;
const int kFunctionOffset = kReceiverOffset + kPointerSize;
+ const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+
+ // Push the vector.
+ __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a1, FieldMemOperand(a1, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ Push(a1);
__ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
__ push(a0);
@@ -1482,10 +1509,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
Generate_CheckStackOverflow(masm, kFunctionOffset, v0, kArgcIsSmiTagged);
// Push current limit and index.
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
+ const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
__ mov(a1, zero_reg);
__ Push(v0, a1); // Limit and initial index.
@@ -1531,8 +1556,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
// Convert the receiver to a regular object.
// a0: receiver
__ bind(&call_to_object);
- __ push(a0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
__ Branch(&push_receiver);
@@ -1546,8 +1571,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ push(a0);
// Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// Call the function.
Label call_proxy;
@@ -1587,6 +1612,13 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
const int kFunctionOffset = kArgumentsOffset + kPointerSize;
+ const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+
+ // Push the vector.
+ __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a1, FieldMemOperand(a1, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ Push(a1);
// If newTarget is not supplied, set it to constructor
Label validate_arguments;
@@ -1611,33 +1643,28 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
Generate_CheckStackOverflow(masm, kFunctionOffset, v0, kArgcIsSmiTagged);
// Push current limit and index.
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
+ const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
__ push(v0); // limit
__ mov(a1, zero_reg); // initial index
__ push(a1);
- // Push newTarget and callee functions
- __ lw(a0, MemOperand(fp, kNewTargetOffset));
- __ push(a0);
+ // Push the constructor function as callee.
__ lw(a0, MemOperand(fp, kFunctionOffset));
__ push(a0);
// Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// Use undefined feedback vector
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ lw(a1, MemOperand(fp, kFunctionOffset));
+ __ lw(t0, MemOperand(fp, kNewTargetOffset));
// Call the function.
CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
-
// Leave internal frame.
}
__ jr(ra);
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 04aa17dfe2..211eaf9359 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/base/bits.h"
@@ -14,8 +12,8 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
-#include "src/jsregexp.h"
-#include "src/regexp-macro-assembler.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -33,7 +31,7 @@ static void InitializeArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -49,7 +47,7 @@ static void InitializeInternalArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -295,6 +293,8 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
// Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics, since
// we need to throw a TypeError. Smis have already been ruled out.
@@ -309,6 +309,8 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
// Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics,
// since we need to throw a TypeError. Smis and heap numbers have
@@ -724,26 +726,30 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// a1 (rhs) second.
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
- if (cc == eq) {
- native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == eq && strict()) {
+ __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
} else {
- native =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- int ncr; // NaN compare result.
- if (cc == lt || cc == le) {
- ncr = GREATER;
+ Builtins::JavaScript native;
+ if (cc == eq) {
+ native = Builtins::EQUALS;
} else {
- DCHECK(cc == gt || cc == ge); // Remaining cases.
- ncr = LESS;
+ native =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
+ int ncr; // NaN compare result.
+ if (cc == lt || cc == le) {
+ ncr = GREATER;
+ } else {
+ DCHECK(cc == gt || cc == ge); // Remaining cases.
+ ncr = LESS;
+ }
+ __ li(a0, Operand(Smi::FromInt(ncr)));
+ __ push(a0);
}
- __ li(a0, Operand(Smi::FromInt(ncr)));
- __ push(a0);
- }
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -1673,7 +1679,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(a1);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments, 1, 1);
}
@@ -1939,10 +1945,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
- masm->isolate()),
- 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -2511,16 +2514,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
+ bool is_super) {
// a0 : number of arguments to the construct function
- // a2 : Feedback vector
+ // a2 : feedback vector
// a3 : slot in feedback vector (Smi)
// a1 : the function to call
+ // t0 : original constructor (for IsSuperConstructorCall)
FrameScope scope(masm, StackFrame::INTERNAL);
- const RegList kSavedRegs = 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6 | // a2
- 1 << 7; // a3
+ const RegList kSavedRegs = 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7 | // a3
+ BoolToInt(is_super) << 8; // t0
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(a0);
@@ -2533,14 +2539,15 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
+static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a0 : number of arguments to the construct function
// a1 : the function to call
- // a2 : Feedback vector
+ // a2 : feedback vector
// a3 : slot in feedback vector (Smi)
+ // t0 : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2548,23 +2555,23 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
- // Load the cache state into t0.
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, Operand(t0));
- __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
+ // Load the cache state into t2.
+ __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t2, a2, Operand(t2));
+ __ lw(t2, FieldMemOperand(t2, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- // We don't know if t0 is a WeakCell or a Symbol, but it's harmless to read at
+ // We don't know if t2 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in type-feedback-vector.h).
Label check_allocation_site;
Register feedback_map = t1;
Register weak_value = t4;
- __ lw(weak_value, FieldMemOperand(t0, WeakCell::kValueOffset));
+ __ lw(weak_value, FieldMemOperand(t2, WeakCell::kValueOffset));
__ Branch(&done, eq, a1, Operand(weak_value));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ Branch(&done, eq, t0, Operand(at));
- __ lw(feedback_map, FieldMemOperand(t0, HeapObject::kMapOffset));
+ __ Branch(&done, eq, t2, Operand(at));
+ __ lw(feedback_map, FieldMemOperand(t2, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kWeakCellMapRootIndex);
__ Branch(FLAG_pretenuring_call_new ? &miss : &check_allocation_site, ne,
feedback_map, Operand(at));
@@ -2583,8 +2590,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ Branch(&miss, ne, feedback_map, Operand(at));
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
- __ Branch(&megamorphic, ne, a1, Operand(t0));
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2);
+ __ Branch(&megamorphic, ne, a1, Operand(t2));
__ jmp(&done);
}
@@ -2593,35 +2600,35 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
- __ Branch(&initialize, eq, t0, Operand(at));
+ __ Branch(&initialize, eq, t2, Operand(at));
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, Operand(t0));
+ __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t2, a2, Operand(t2));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
+ __ sw(at, FieldMemOperand(t2, FixedArray::kHeaderSize));
__ jmp(&done);
// An uninitialized cache is patched with the function.
__ bind(&initialize);
if (!FLAG_pretenuring_call_new) {
// Make sure the function is the Array() function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
- __ Branch(&not_array_function, ne, a1, Operand(t0));
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2);
+ __ Branch(&not_array_function, ne, a1, Operand(t2));
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub);
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ Branch(&done);
__ bind(&not_array_function);
}
CreateWeakCellStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub);
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ bind(&done);
}
@@ -2670,8 +2677,10 @@ static void EmitSlowCase(MacroAssembler* masm,
static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
// Wrap the receiver and patch it back onto the stack.
{ FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(a1, a3);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Push(a1);
+ __ mov(a0, a3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ pop(a1);
}
__ Branch(USE_DELAY_SLOT, cont);
@@ -2743,17 +2752,18 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
// a2 : feedback vector
- // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
+ // a3 : slot in feedback vector (Smi, for RecordCallTarget)
+ // t0 : original constructor (for IsSuperConstructorCall)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(a1, &non_function_call);
// Check that the function is a JSFunction.
- __ GetObjectType(a1, t0, t0);
- __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
+ __ GetObjectType(a1, t1, t1);
+ __ Branch(&slow, ne, t1, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ GenerateRecordCallTarget(masm, IsSuperConstructorCall());
__ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t1, a2, at);
@@ -2778,11 +2788,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// Pass function as original constructor.
if (IsSuperConstructorCall()) {
- __ li(t0, Operand(1 * kPointerSize));
- __ sll(at, a0, kPointerSizeLog2);
- __ Addu(t0, t0, Operand(at));
- __ Addu(at, sp, Operand(t0));
- __ lw(a3, MemOperand(at, 0));
+ __ mov(a3, t0);
} else {
__ mov(a3, a1);
}
@@ -2797,10 +2803,10 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// a0: number of arguments
// a1: called object
- // t0: object type
+ // t1: object type
Label do_call;
__ bind(&slow);
- __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Branch(&non_function_call, ne, t1, Operand(JS_FUNCTION_PROXY_TYPE));
__ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
@@ -3035,11 +3041,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(a1, a2, a3);
// Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
-
- ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
- __ CallExternalReference(miss, 3);
+ Runtime::FunctionId id = GetICState() == DEFAULT
+ ? Runtime::kCallIC_Miss
+ : Runtime::kCallIC_Customization_Miss;
+ __ CallRuntime(id, 3);
// Move result to a1 and exit the internal frame.
__ mov(a1, v0);
@@ -3161,11 +3166,9 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
- __ And(t0,
- code_,
- Operand(kSmiTagMask |
- ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
+ DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
+ __ And(t0, code_, Operand(kSmiTagMask |
+ ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
__ Branch(&slow_case_, ne, t0, Operand(zero_reg));
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
@@ -3449,7 +3452,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
__ bind(&single_char);
// v0: original string
@@ -3643,7 +3646,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3953,7 +3956,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ bind(&miss);
@@ -4003,14 +4006,13 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a0);
__ Push(ra, a1, a0);
__ li(t0, Operand(Smi::FromInt(op())));
__ addiu(sp, sp, -kPointerSize);
- __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
+ __ CallRuntime(Runtime::kCompareIC_Miss, 3, kDontSaveFPRegs,
+ USE_DELAY_SLOT);
__ sw(t0, MemOperand(sp)); // In the delay slot.
// Compute the entry point of the rewritten stub.
__ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4086,7 +4088,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Register entity_name = scratch0;
// Having undefined at this place means the name is not contained.
- DCHECK_EQ(kSmiTagSize, 1);
+ STATIC_ASSERT(kSmiTagSize == 1);
Register tmp = properties;
__ sll(scratch0, index, 1);
__ Addu(tmp, properties, scratch0);
@@ -4176,7 +4178,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ And(scratch2, scratch1, scratch2);
// Scale the index by multiplying by the element size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
__ sll(at, scratch2, 1);
@@ -4263,14 +4265,14 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ And(index, mask, index);
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// index *= 3.
__ mov(at, index);
__ sll(index, index, 1);
__ Addu(index, index, at);
- DCHECK_EQ(kSmiTagSize, 1);
+ STATIC_ASSERT(kSmiTagSize == 1);
__ sll(index, index, 2);
__ Addu(index, index, dictionary);
__ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
@@ -4752,7 +4754,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
- false, receiver, name, feedback,
+ receiver, name, feedback,
receiver_map, scratch1, t5);
__ bind(&miss);
@@ -4994,12 +4996,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// sp[0] - last argument
Label normal_sequence;
if (mode == DONT_OVERRIDE) {
- DCHECK(FAST_SMI_ELEMENTS == 0);
- DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
- DCHECK(FAST_ELEMENTS == 2);
- DCHECK(FAST_HOLEY_ELEMENTS == 3);
- DCHECK(FAST_DOUBLE_ELEMENTS == 4);
- DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// is the low bit set? If so, we are holey and that is good.
__ And(at, a3, Operand(1));
@@ -5272,6 +5274,152 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context_reg = cp;
+ Register slot_reg = a2;
+ Register result_reg = v0;
+ Label slow_case;
+
+ // Go up context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ lw(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ context_reg = result_reg;
+ }
+
+ // Load the PropertyCell value at the specified slot.
+ __ sll(at, slot_reg, kPointerSizeLog2);
+ __ Addu(at, at, Operand(context_reg));
+ __ lw(result_reg, ContextOperand(at, 0));
+ __ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
+
+ // Check that value is not the_hole.
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&slow_case, eq, result_reg, Operand(at));
+ __ Ret();
+
+ // Fallback to the runtime.
+ __ bind(&slow_case);
+ __ SmiTag(slot_reg);
+ __ Push(slot_reg);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+}
+
+
+void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context_reg = cp;
+ Register slot_reg = a2;
+ Register value_reg = a0;
+ Register cell_reg = t0;
+ Register cell_value_reg = t1;
+ Register cell_details_reg = t2;
+ Label fast_heapobject_case, fast_smi_case, slow_case;
+
+ if (FLAG_debug_code) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Check(ne, kUnexpectedValue, value_reg, Operand(at));
+ }
+
+ // Go up context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ lw(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ context_reg = cell_reg;
+ }
+
+ // Load the PropertyCell at the specified slot.
+ __ sll(at, slot_reg, kPointerSizeLog2);
+ __ Addu(at, at, Operand(context_reg));
+ __ lw(cell_reg, ContextOperand(at, 0));
+
+ // Load PropertyDetails for the cell (actually only the cell_type and kind).
+ __ lw(cell_details_reg,
+ FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset));
+ __ SmiUntag(cell_details_reg);
+ __ And(cell_details_reg, cell_details_reg,
+ PropertyDetails::PropertyCellTypeField::kMask |
+ PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask);
+
+ // Check if PropertyCell holds mutable data.
+ Label not_mutable_data;
+ __ Branch(&not_mutable_data, ne, cell_details_reg,
+ Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kMutable) |
+ PropertyDetails::KindField::encode(kData)));
+ __ JumpIfSmi(value_reg, &fast_smi_case);
+ __ bind(&fast_heapobject_case);
+ __ sw(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
+ __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
+ cell_details_reg, kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ // RecordWriteField clobbers the value register, so we need to reload.
+ __ Ret(USE_DELAY_SLOT);
+ __ lw(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
+ __ bind(&not_mutable_data);
+
+ // Check if PropertyCell value matches the new value (relevant for Constant,
+ // ConstantType and Undefined cells).
+ Label not_same_value;
+ __ lw(cell_value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
+ __ Branch(&not_same_value, ne, value_reg, Operand(cell_value_reg));
+ // Make sure the PropertyCell is not marked READ_ONLY.
+ __ And(at, cell_details_reg, PropertyDetails::kAttributesReadOnlyMask);
+ __ Branch(&slow_case, ne, at, Operand(zero_reg));
+ if (FLAG_debug_code) {
+ Label done;
+ // This can only be true for Constant, ConstantType and Undefined cells,
+ // because we never store the_hole via this stub.
+ __ Branch(&done, eq, cell_details_reg,
+ Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstant) |
+ PropertyDetails::KindField::encode(kData)));
+ __ Branch(&done, eq, cell_details_reg,
+ Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+ __ Check(eq, kUnexpectedValue, cell_details_reg,
+ Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kUndefined) |
+ PropertyDetails::KindField::encode(kData)));
+ __ bind(&done);
+ }
+ __ Ret();
+ __ bind(&not_same_value);
+
+ // Check if PropertyCell contains data with constant type (and is not
+ // READ_ONLY).
+ __ Branch(&slow_case, ne, cell_details_reg,
+ Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+
+ // Now either both old and new values must be SMIs or both must be heap
+ // objects with same map.
+ Label value_is_heap_object;
+ __ JumpIfNotSmi(value_reg, &value_is_heap_object);
+ __ JumpIfNotSmi(cell_value_reg, &slow_case);
+ // Old and new values are SMIs, no need for a write barrier here.
+ __ bind(&fast_smi_case);
+ __ Ret(USE_DELAY_SLOT);
+ __ sw(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
+ __ bind(&value_is_heap_object);
+ __ JumpIfSmi(cell_value_reg, &slow_case);
+ Register cell_value_map_reg = cell_value_reg;
+ __ lw(cell_value_map_reg,
+ FieldMemOperand(cell_value_reg, HeapObject::kMapOffset));
+ __ Branch(&fast_heapobject_case, eq, cell_value_map_reg,
+ FieldMemOperand(value_reg, HeapObject::kMapOffset));
+
+ // Fallback to the runtime.
+ __ bind(&slow_case);
+ __ SmiTag(slot_reg);
+ __ Push(slot_reg, value_reg);
+ __ TailCallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2, 1);
+}
+
+
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index 460f726e07..67228e0170 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -2,8 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_CODE_STUBS_ARM_H_
-#define V8_MIPS_CODE_STUBS_ARM_H_
+#ifndef V8_MIPS_CODE_STUBS_MIPS_H_
+#define V8_MIPS_CODE_STUBS_MIPS_H_
+
+#include "src/mips/frames-mips.h"
namespace v8 {
namespace internal {
@@ -343,4 +345,4 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
} } // namespace v8::internal
-#endif // V8_MIPS_CODE_STUBS_ARM_H_
+#endif // V8_MIPS_CODE_STUBS_MIPS_H_
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 23780d8b25..6ef8fc6beb 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/codegen.h"
@@ -1193,10 +1191,9 @@ CodeAgingHelper::CodeAgingHelper() {
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before MIPS simulator ICache is setup.
- SmartPointer<CodePatcher> patcher(
- new CodePatcher(young_sequence_.start(),
- young_sequence_.length() / Assembler::kInstrSize,
- CodePatcher::DONT_FLUSH));
+ base::SmartPointer<CodePatcher> patcher(new CodePatcher(
+ young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->Push(ra, fp, cp, a1);
patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index b02ec4ff10..f79ad4e41c 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -14,9 +14,6 @@ namespace v8 {
namespace internal {
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
index 0ef64f508d..40d497fd99 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/mips/constants-mips.h"
diff --git a/deps/v8/src/mips/cpu-mips.cc b/deps/v8/src/mips/cpu-mips.cc
index acc7af28e5..dff1d30402 100644
--- a/deps/v8/src/mips/cpu-mips.cc
+++ b/deps/v8/src/mips/cpu-mips.cc
@@ -11,8 +11,6 @@
#include <asm/cachectl.h>
#endif // #ifdef __mips
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/assembler.h"
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
deleted file mode 100644
index 896309a36e..0000000000
--- a/deps/v8/src/mips/debug-mips.cc
+++ /dev/null
@@ -1,252 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_MIPS
-
-#include "src/codegen.h"
-#include "src/debug.h"
-
-namespace v8 {
-namespace internal {
-
-void BreakLocation::SetDebugBreakAtReturn() {
- // Mips return sequence:
- // mov sp, fp
- // lw fp, sp(0)
- // lw ra, sp(4)
- // addiu sp, sp, 8
- // addiu sp, sp, N
- // jr ra
- // nop (in branch delay slot)
-
- // Make sure this constant matches the number if instrucntions we emit.
- DCHECK(Assembler::kJSReturnSequenceInstructions == 7);
- CodePatcher patcher(pc(), Assembler::kJSReturnSequenceInstructions);
- // li and Call pseudo-instructions emit two instructions each.
- patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
- debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry())));
- patcher.masm()->Call(v8::internal::t9);
- patcher.masm()->nop();
- patcher.masm()->nop();
- patcher.masm()->nop();
-
- // TODO(mips): Open issue about using breakpoint instruction instead of nops.
- // patcher.masm()->bkpt(0);
-}
-
-
-void BreakLocation::SetDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- // Patch the code changing the debug break slot code from:
- // nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
- // nop(DEBUG_BREAK_NOP)
- // nop(DEBUG_BREAK_NOP)
- // nop(DEBUG_BREAK_NOP)
- // to a call to the debug break slot code.
- // li t9, address (lui t9 / ori t9 instruction pair)
- // call t9 (jalr t9 / nop instruction pair)
- CodePatcher patcher(pc(), Assembler::kDebugBreakSlotInstructions);
- patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
- debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry())));
- patcher.masm()->Call(v8::internal::t9);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load padding words on stack.
- __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
- __ Subu(sp, sp,
- Operand(kPointerSize * LiveEdit::kFramePaddingInitialSize));
- for (int i = LiveEdit::kFramePaddingInitialSize - 1; i >= 0; i--) {
- __ sw(at, MemOperand(sp, kPointerSize * i));
- }
- __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
- __ push(at);
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- DCHECK((object_regs & ~kJSCallerSaved) == 0);
- DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
- DCHECK((object_regs & non_object_regs) == 0);
- if ((object_regs | non_object_regs) != 0) {
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ And(at, reg, 0xc0000000);
- __ Assert(eq, kUnableToEncodeValueAsSmi, at, Operand(zero_reg));
- }
- __ sll(reg, reg, kSmiTagSize);
- }
- }
- __ MultiPush(object_regs | non_object_regs);
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ PrepareCEntryArgs(0); // No arguments.
- __ PrepareCEntryFunction(ExternalReference::debug_break(masm->isolate()));
-
- CEntryStub ceb(masm->isolate(), 1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- if ((object_regs | non_object_regs) != 0) {
- __ MultiPop(object_regs | non_object_regs);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- __ srl(reg, reg, kSmiTagSize);
- }
- if (FLAG_debug_code &&
- (((object_regs |non_object_regs) & (1 << r)) == 0)) {
- __ li(reg, kDebugZapValue);
- }
- }
- }
-
- // Don't bother removing padding bytes pushed on the stack
- // as the frame is going to be restored right away.
-
- // Leave the internal frame.
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ li(t9, Operand(after_break_target));
- __ lw(t9, MemOperand(t9));
- __ Jump(t9);
-}
-
-
-void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallICStub
- // ----------- S t a t e -------------
- // -- a1 : function
- // -- a3 : slot in feedback array (smi)
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a3.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // In places other than IC call sites it is expected that v0 is TOS which
- // is an object - this is not generally the case so this should be used with
- // care.
- Generate_DebugBreakCallHelper(masm, v0.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-mips.cc).
- // ----------- S t a t e -------------
- // -- a1 : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-mips.cc).
- // ----------- S t a t e -------------
- // -- a0 : number of arguments (not smi)
- // -- a1 : constructor function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() , a0.bit());
-}
-
-
-void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
- MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-mips.cc).
- // ----------- S t a t e -------------
- // -- a0 : number of arguments (not smi)
- // -- a1 : constructor function
- // -- a2 : feedback array
- // -- a3 : feedback slot (smi)
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), a0.bit());
-}
-
-
-void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction. Avoid emitting
- // the trampoline pool in the debug break slot code.
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
- __ nop(MacroAssembler::DEBUG_BREAK_NOP);
- }
- DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
- masm->InstructionsGeneratedSince(&check_codesize));
-}
-
-
-void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0);
-}
-
-
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ li(at, Operand(restarter_frame_function_slot));
- __ sw(zero_reg, MemOperand(at, 0));
-
- // We do not know our frame height, but set sp based on fp.
- __ Subu(sp, fp, Operand(kPointerSize));
-
- __ Pop(ra, fp, a1); // Return address, Frame, Function.
-
- // Load context from the function.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Get function code.
- __ lw(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset));
- __ Addu(t9, at, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Re-run JSFunction, a1 is function, cp is context.
- __ Jump(t9);
-}
-
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 69e8514f67..974692495a 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -1,13 +1,10 @@
-
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/codegen.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/safepoint-table.h"
namespace v8 {
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 48427c5455..6028e90b44 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -22,14 +22,11 @@
// of code into a FILE*, meaning that the above functionality could also be
// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
-
#include <assert.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/base/platform/platform.h"
@@ -352,8 +349,10 @@ void Decoder::PrintPCImm21(Instruction* instr, int delta_pc, int n_bits) {
// Print 26-bit hex immediate value.
void Decoder::PrintXImm26(Instruction* instr) {
- uint32_t imm = instr->Imm26Value() << kImmFieldShift;
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+ uint32_t target = static_cast<uint32_t>(instr->Imm26Value())
+ << kImmFieldShift;
+ target = (reinterpret_cast<uint32_t>(instr) & ~0xfffffff) | target;
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", target);
}
@@ -988,7 +987,7 @@ void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) {
Format(instr, "jr 'rs");
break;
case JALR:
- Format(instr, "jalr 'rs");
+ Format(instr, "jalr 'rs, 'rd");
break;
case SLL:
if (0x0 == static_cast<int>(instr->InstructionBits()))
@@ -1346,9 +1345,13 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
case BGEZ:
Format(instr, "bgez 'rs, 'imm16u -> 'imm16p4s2");
break;
- case BGEZAL:
- Format(instr, "bgezal 'rs, 'imm16u -> 'imm16p4s2");
+ case BGEZAL: {
+ if (instr->RsValue() == 0)
+ Format(instr, "bal 'imm16s -> 'imm16p4s2");
+ else
+ Format(instr, "bgezal 'rs, 'imm16u -> 'imm16p4s2");
break;
+ }
case BGEZALL:
Format(instr, "bgezall 'rs, 'imm16u -> 'imm16p4s2");
break;
diff --git a/deps/v8/src/mips/frames-mips.cc b/deps/v8/src/mips/frames-mips.cc
index 3e6293e2e5..5350239d6d 100644
--- a/deps/v8/src/mips/frames-mips.cc
+++ b/deps/v8/src/mips/frames-mips.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/assembler.h"
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index f0b734db1c..0452ece222 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -169,12 +169,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
} } // namespace v8::internal
#endif
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 0379da0ad8..1afc3f2b29 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/interface-descriptors.h"
@@ -36,7 +34,11 @@ const Register VectorStoreICDescriptor::VectorRegister() { return a3; }
const Register StoreTransitionDescriptor::MapRegister() { return a3; }
-const Register ElementTransitionAndStoreDescriptor::MapRegister() { return a3; }
+const Register LoadGlobalViaContextDescriptor::SlotRegister() { return a2; }
+
+
+const Register StoreGlobalViaContextDescriptor::SlotRegister() { return a2; }
+const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
const Register InstanceofDescriptor::left() { return a0; }
@@ -62,6 +64,14 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
+void StoreTransitionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ MapRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2};
@@ -83,6 +93,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
}
+// static
+const Register ToObjectDescriptor::ReceiverRegister() { return a0; }
+
+
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a0};
@@ -158,11 +172,11 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// a0 : number of arguments
// a1 : the function to call
// a2 : feedback vector
- // a3 : (only if a2 is not the megamorphic symbol) slot in feedback
- // vector (Smi)
+ // a3 : slot in feedback vector (Smi, for RecordCallTarget)
+ // t0 : original constructor (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {a0, a1, a2};
+ Register registers[] = {a0, a1, t0, a2};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
@@ -330,11 +344,22 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
+void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a1, // math rounding function
+ a3, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void MathRoundVariantCallFromOptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
a1, // math rounding function
a3, // vector slot id
+ a2, // type vector
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 5c26001ef0..768531fce3 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/v8.h"
-
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@@ -130,7 +128,7 @@ bool LCodeGen::GeneratePrologue() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop_at");
}
#endif
@@ -424,6 +422,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
+ AllowDeferredHandleDereference get_number;
DCHECK(literal->IsNumber());
__ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
} else if (r.IsSmi()) {
@@ -646,15 +645,23 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
if (op->IsStackSlot()) {
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
if (is_tagged) {
- translation->StoreStackSlot(op->index());
+ translation->StoreStackSlot(index);
} else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
+ translation->StoreUint32StackSlot(index);
} else {
- translation->StoreInt32StackSlot(op->index());
+ translation->StoreInt32StackSlot(index);
}
} else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
+ translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -2180,6 +2187,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
}
+ if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ // SIMD value -> true.
+ const Register scratch = scratch1();
+ __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(instr->TrueLabel(chunk_), eq, scratch,
+ Operand(SIMD128_VALUE_TYPE));
+ }
+
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
DoubleRegister dbl_scratch = double_scratch0();
@@ -2879,13 +2894,31 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
__ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
- PREMONOMORPHIC).code();
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+ SLOPPY, PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->result()).is(v0));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ li(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ Handle<Code> stub =
+ CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2982,7 +3015,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3084,10 +3117,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
? (element_size_shift - kSmiTagSize) : element_size_shift;
int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
FPURegister result = ToDoubleRegister(instr->result());
if (key_is_constant) {
__ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
@@ -3095,8 +3125,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
}
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
__ lwc1(result, MemOperand(scratch0(), base_offset));
__ cvt_d_s(result, result);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
@@ -3108,29 +3137,22 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
key, external_pointer, key_is_constant, constant_key,
element_size_shift, shift_size, base_offset);
switch (elements_kind) {
- case EXTERNAL_INT8_ELEMENTS:
case INT8_ELEMENTS:
__ lb(result, mem_operand);
break;
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
__ lbu(result, mem_operand);
break;
- case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
__ lh(result, mem_operand);
break;
- case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
__ lhu(result, mem_operand);
break;
- case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
__ lw(result, mem_operand);
break;
- case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
@@ -3140,8 +3162,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -3253,7 +3273,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3507,9 +3527,8 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
__ li(scratch0(), instr->hydrogen()->pairs());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
- // The context is the first argument.
- __ Push(cp, scratch0(), scratch1());
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ __ Push(scratch0(), scratch1());
+ CallRuntime(Runtime::kDeclareGlobals, 2, instr);
}
@@ -4179,6 +4198,30 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->value())
+ .is(StoreGlobalViaContextDescriptor::ValueRegister()));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ li(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
+ isolate(), depth, instr->language_mode())
+ .code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
Operand operand(0);
@@ -4221,10 +4264,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
? (element_size_shift - kSmiTagSize) : element_size_shift;
int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
Register address = scratch0();
FPURegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
@@ -4239,8 +4279,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
__ Addu(address, external_pointer, address);
}
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
__ cvt_s_d(double_scratch0(), value);
__ swc1(double_scratch0(), MemOperand(address, base_offset));
} else { // Storing doubles, not floats.
@@ -4253,30 +4292,21 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
element_size_shift, shift_size,
base_offset);
switch (elements_kind) {
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_INT8_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
case INT8_ELEMENTS:
__ sb(value, mem_operand);
break;
- case EXTERNAL_INT16_ELEMENTS:
- case EXTERNAL_UINT16_ELEMENTS:
case INT16_ELEMENTS:
case UINT16_ELEMENTS:
__ sh(value, mem_operand);
break;
- case EXTERNAL_INT32_ELEMENTS:
- case EXTERNAL_UINT32_ELEMENTS:
case INT32_ELEMENTS:
case UINT32_ELEMENTS:
__ sw(value, mem_operand);
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -4390,7 +4420,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases: external, fast double
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -5651,15 +5681,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
} else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label);
__ GetObjectType(input, input, scratch);
- __ Branch(USE_DELAY_SLOT, false_label,
- ge, scratch, Operand(FIRST_NONSTRING_TYPE));
- // input is an object so we can load the BitFieldOffset even if we take the
- // other branch.
- __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
- __ And(at, at, 1 << Map::kIsUndetectable);
- *cmp1 = at;
- *cmp2 = Operand(zero_reg);
- final_branch_condition = eq;
+ *cmp1 = scratch;
+ *cmp2 = Operand(FIRST_NONSTRING_TYPE);
+ final_branch_condition = lt;
} else if (String::Equals(type_name, factory->symbol_string())) {
__ JumpIfSmi(input, false_label);
@@ -5717,6 +5741,19 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
*cmp2 = Operand(zero_reg);
final_branch_condition = eq;
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(type_name, factory->type##_string())) { \
+ __ JumpIfSmi(input, false_label); \
+ __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset)); \
+ __ LoadRoot(at, Heap::k##Type##MapRootIndex); \
+ *cmp1 = input; \
+ *cmp2 = Operand(at); \
+ final_branch_condition = eq;
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
+
} else {
*cmp1 = at;
*cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.cc b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
index df92ab9f5c..cdaf2463a0 100644
--- a/deps/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/mips/lithium-codegen-mips.h"
#include "src/mips/lithium-gap-resolver-mips.h"
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.h b/deps/v8/src/mips/lithium-gap-resolver-mips.h
index ab950559c5..7374da7727 100644
--- a/deps/v8/src/mips/lithium-gap-resolver-mips.h
+++ b/deps/v8/src/mips/lithium-gap-resolver-mips.h
@@ -5,8 +5,6 @@
#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
-#include "src/v8.h"
-
#include "src/lithium.h"
namespace v8 {
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index fd90584ea9..6cd3410645 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -4,8 +4,6 @@
#include <sstream>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/hydrogen-osr.h"
@@ -337,6 +335,11 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
+void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d", depth(), slot_index());
+}
+
+
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -355,6 +358,12 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
+void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
+ value()->PrintTo(stream);
+}
+
+
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -1608,8 +1617,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsExternal()) {
- DCHECK(instr->left()->representation().IsExternal());
- DCHECK(instr->right()->representation().IsInteger32());
+ DCHECK(instr->IsConsistentExternalRepresentation());
DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
@@ -2097,6 +2105,15 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
+ HLoadGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ DCHECK(instr->slot_index() > 0);
+ LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2165,7 +2182,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LInstruction* result = NULL;
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseRegister(instr->elements());
@@ -2185,10 +2202,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
bool needs_environment;
- if (instr->is_external() || instr->is_fixed_typed_array()) {
+ if (instr->is_fixed_typed_array()) {
// see LCodeGen::DoLoadKeyedExternalArray
- needs_environment = (elements_kind == EXTERNAL_UINT32_ELEMENTS ||
- elements_kind == UINT32_ELEMENTS) &&
+ needs_environment = elements_kind == UINT32_ELEMENTS &&
!instr->CheckFlag(HInstruction::kUint32);
} else {
// see LCodeGen::DoLoadKeyedFixedDoubleArray and
@@ -2223,7 +2239,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
DCHECK(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
@@ -2255,10 +2271,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(instr->elements_kind())));
- DCHECK((instr->is_fixed_typed_array() &&
- instr->elements()->representation().IsTagged()) ||
- (instr->is_external() &&
- instr->elements()->representation().IsExternal()));
+ DCHECK(instr->elements()->representation().IsExternal());
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* backing_store = UseRegister(instr->elements());
@@ -2384,6 +2397,19 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
+ HStoreGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* value = UseFixed(instr->value(),
+ StoreGlobalViaContextDescriptor::ValueRegister());
+ DCHECK(instr->slot_index() > 0);
+
+ LStoreGlobalViaContext* result =
+ new (zone()) LStoreGlobalViaContext(context, value);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), a1);
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 82c5ffd29d..2998219892 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -102,6 +102,7 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
+ V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -142,6 +143,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
+ V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1608,15 +1610,9 @@ class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- bool is_external() const {
- return hydrogen()->is_external();
- }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@@ -1663,7 +1659,23 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
+ TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+
+class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ LOperand* context() { return inputs_[0]; }
+
+ int depth() const { return hydrogen()->depth(); }
+ int slot_index() const { return hydrogen()->slot_index(); }
};
@@ -2168,6 +2180,28 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
+class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreGlobalViaContext(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
+ "store-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int depth() { return hydrogen()->depth(); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
@@ -2176,13 +2210,9 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
inputs_[2] = value;
}
- bool is_external() const { return hydrogen()->is_external(); }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index f554b0c1ef..224bc5c7f4 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -4,8 +4,6 @@
#include <limits.h> // For LONG_MIN, LONG_MAX.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/base/bits.h"
@@ -13,7 +11,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -164,6 +162,9 @@ void MacroAssembler::InNewSpace(Register object,
}
+// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
+// The register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
void MacroAssembler::RecordWriteField(
Register object,
int offset,
@@ -217,8 +218,7 @@ void MacroAssembler::RecordWriteField(
}
-// Will clobber 4 registers: object, map, dst, ip. The
-// register 'object' contains a heap object pointer.
+// Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
void MacroAssembler::RecordWriteForMap(Register object,
Register map,
Register dst,
@@ -292,8 +292,8 @@ void MacroAssembler::RecordWriteForMap(Register object,
}
-// Will clobber 4 registers: object, address, scratch, ip. The
-// register 'object' contains a heap object pointer. The heap object
+// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
+// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
void MacroAssembler::RecordWrite(
Register object,
@@ -3287,10 +3287,11 @@ void MacroAssembler::Push(Handle<Object> handle) {
void MacroAssembler::DebugBreak() {
PrepareCEntryArgs(0);
- PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
+ PrepareCEntryFunction(
+ ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
CEntryStub ces(isolate(), 1);
DCHECK(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
}
@@ -3514,26 +3515,6 @@ void MacroAssembler::Allocate(Register object_size,
}
-void MacroAssembler::UndoAllocationInNewSpace(Register object,
- Register scratch) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- And(object, object, Operand(~kHeapObjectTagMask));
-#ifdef DEBUG
- // Check that the object un-allocated is below the current top.
- li(scratch, Operand(new_space_allocation_top));
- lw(scratch, MemOperand(scratch));
- Check(less, kUndoAllocationOfNonAllocatedMemory,
- object, Operand(scratch));
-#endif
- // Write the address of the object to un-allocate as the current top.
- li(scratch, Operand(new_space_allocation_top));
- sw(object, MemOperand(scratch));
-}
-
-
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -4579,9 +4560,9 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
}
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments,
- SaveFPRegsMode save_doubles) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles,
+ BranchDelaySlot bd) {
// All parameters are on the stack. v0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -4596,7 +4577,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference(f, isolate()));
CEntryStub stub(isolate(), 1, save_doubles);
- CallStub(&stub);
+ CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
}
@@ -6005,19 +5986,28 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
DCHECK(!scratch1.is(scratch0));
Factory* factory = isolate()->factory();
Register current = scratch0;
- Label loop_again;
+ Label loop_again, end;
// Scratch contained elements pointer.
Move(current, object);
+ lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ Branch(&end, eq, current, Operand(factory->null_value()));
// Loop based on the map going up the prototype chain.
bind(&loop_again);
lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
+ Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
Branch(&loop_again, ne, current, Operand(factory->null_value()));
+
+ bind(&end);
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 22d12b430b..995c082119 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -12,6 +12,19 @@
namespace v8 {
namespace internal {
+// Give alias names to registers for calling conventions.
+const Register kReturnRegister0 = {kRegister_v0_Code};
+const Register kReturnRegister1 = {kRegister_v1_Code};
+const Register kJSFunctionRegister = {kRegister_a1_Code};
+const Register kContextRegister = {Register::kCpRegister};
+const Register kInterpreterAccumulatorRegister = {kRegister_v0_Code};
+const Register kInterpreterRegisterFileRegister = {kRegister_t3_Code};
+const Register kInterpreterBytecodeOffsetRegister = {kRegister_t4_Code};
+const Register kInterpreterBytecodeArrayRegister = {kRegister_t5_Code};
+const Register kInterpreterDispatchTableRegister = {kRegister_t6_Code};
+const Register kRuntimeCallFunctionRegister = {kRegister_a1_Code};
+const Register kRuntimeCallArgCountRegister = {kRegister_a0_Code};
+
// Forward declaration.
class JumpTarget;
@@ -509,13 +522,6 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. The caller must make sure that no pointers
- // are left to the object(s) no longer allocated as they would be invalid when
- // allocation is undone.
- void UndoAllocationInNewSpace(Register object, Register scratch);
-
-
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -681,6 +687,17 @@ class MacroAssembler: public Assembler {
sw(src4, MemOperand(sp, 0 * kPointerSize));
}
+ // Push five registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4,
+ Register src5) {
+ Subu(sp, sp, Operand(5 * kPointerSize));
+ sw(src1, MemOperand(sp, 4 * kPointerSize));
+ sw(src2, MemOperand(sp, 3 * kPointerSize));
+ sw(src3, MemOperand(sp, 2 * kPointerSize));
+ sw(src4, MemOperand(sp, 1 * kPointerSize));
+ sw(src5, MemOperand(sp, 0 * kPointerSize));
+ }
+
void Push(Register src, Condition cond, Register tst1, Register tst2) {
// Since we don't have conditional execution we use a Branch.
Branch(3, cond, tst1, Operand(tst2));
@@ -1228,19 +1245,19 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void CallJSExitStub(CodeStub* stub);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f,
- int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs,
+ BranchDelaySlot bd = PROTECT);
void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ void CallRuntime(Runtime::FunctionId id, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs,
+ BranchDelaySlot bd = PROTECT) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles, bd);
}
// Convenience function: call an external reference.
@@ -1710,7 +1727,7 @@ class CodePatcher {
CodePatcher(byte* address,
int instructions,
FlushICache flush_cache = FLUSH);
- virtual ~CodePatcher();
+ ~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 83b5905cd0..6dea3f09a3 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -7,8 +7,6 @@
#include <stdlib.h>
#include <cmath>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
#include "src/assembler.h"
@@ -1809,9 +1807,15 @@ void Simulator::WriteB(int32_t addr, int8_t value) {
// Returns the limit of the stack area to enable checking for stack overflows.
-uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
- // pushing values.
+uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
+ // The simulator uses a separate JS stack. If we have exhausted the C stack,
+ // we also drop down the JS limit to reflect the exhaustion on the JS stack.
+ if (GetCurrentStackPosition() < c_limit) {
+ return reinterpret_cast<uintptr_t>(get_sp());
+ }
+
+ // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
+ // to prevent overrunning the stack when pushing values.
return reinterpret_cast<uintptr_t>(stack_) + 1024;
}
@@ -4466,6 +4470,9 @@ void Simulator::Execute() {
void Simulator::CallInternal(byte* entry) {
+ // Adjust JS-based stack limit to C-based stack limit.
+ isolate_->stack_guard()->AdjustStackLimitForSimulator();
+
// Prepare to execute the code at entry.
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 00b79b3cfe..6de5163dda 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -193,12 +193,12 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
- Address get_sp() {
+ Address get_sp() const {
return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
}
// Accessor to the internal simulator stack area.
- uintptr_t StackLimit() const;
+ uintptr_t StackLimit(uintptr_t c_limit) const;
// Executes MIPS instructions until the PC reaches end_sim_pc.
void Execute();
@@ -378,6 +378,7 @@ class Simulator {
instr->OpcodeValue());
}
InstructionDecode(instr);
+ SNPrintF(trace_buf_, " ");
}
// ICache.
@@ -465,15 +466,14 @@ class Simulator {
// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. Setting the c_limit to indicate a very small
-// stack cause stack overflow errors, since the simulator ignores the input.
-// This is unlikely to be an issue in practice, though it might cause testing
-// trouble down the line.
+// the C-based native code. The JS-based limit normally points near the end of
+// the simulator stack. When the C-based limit is exhausted we reflect that by
+// lowering the JS-based limit as well, to make stack checks trigger.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit();
+ return Simulator::current(isolate)->StackLimit(c_limit);
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index bfeb3002eb..16ca33a9f3 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -40,7 +40,7 @@
#include "src/mips64/assembler-mips64.h"
#include "src/assembler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
namespace v8 {
@@ -117,7 +117,7 @@ int FPURegister::ToAllocationIndex(FPURegister reg) {
// -----------------------------------------------------------------------------
// RelocInfo.
-void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+void RelocInfo::apply(intptr_t delta) {
if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
// Absolute code pointer inside code object moves with the code object.
byte* p = reinterpret_cast<byte*>(pc_);
@@ -189,11 +189,6 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
-Address Assembler::break_address_from_return_address(Address pc) {
- return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
-}
-
-
void Assembler::set_target_internal_reference_encoded_at(Address pc,
Address target) {
// Encoded internal references are j/jal instructions.
@@ -349,22 +344,18 @@ void RelocInfo::set_code_age_stub(Code* stub,
}
-Address RelocInfo::call_address() {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- // The pc_ offset of 0 assumes mips patched return sequence per
- // debug-mips.cc BreakLocation::SetDebugBreakAtReturn(), or
- // debug break slot per BreakLocation::SetDebugBreakAtSlot().
+Address RelocInfo::debug_call_address() {
+ // The pc_ offset of 0 assumes patched debug break slot or return
+ // sequence.
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
return Assembler::target_address_at(pc_, host_);
}
-void RelocInfo::set_call_address(Address target) {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- // The pc_ offset of 0 assumes mips patched return sequence per
- // debug-mips.cc BreakLocation::SetDebugBreakAtReturn(), or
- // debug break slot per BreakLocation::SetDebugBreakAtSlot().
+void RelocInfo::set_debug_call_address(Address target) {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+ // The pc_ offset of 0 assumes patched debug break slot or return
+ // sequence.
Assembler::set_target_address_at(pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -374,23 +365,6 @@ void RelocInfo::set_call_address(Address target) {
}
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-Object** RelocInfo::call_object_address() {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(pc_ + 6 * Assembler::kInstrSize);
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@@ -442,11 +416,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- isolate->debug()->has_break_points()) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
@@ -470,11 +441,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index ea497509c6..98dd71122a 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -32,8 +32,6 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/base/cpu.h"
@@ -681,11 +679,9 @@ int Assembler::target_at(int pos, bool is_internal) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
} else {
- uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
- instr_address &= kImm28Mask;
- int delta = static_cast<int>(instr_address - imm28);
- DCHECK(pos > delta);
- return pos - delta;
+ // Sign extend 28-bit offset.
+ int32_t delta = static_cast<int32_t>((imm28 << 4) >> 4);
+ return pos + delta;
}
}
}
@@ -706,7 +702,6 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
return;
}
- DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr));
if (IsBranch(instr)) {
int32_t imm18 = target_pos - (pos + kBranchPCOffset);
DCHECK((imm18 & 3) == 0);
@@ -736,16 +731,25 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr_ori | ((imm >> 16) & kImm16Mask));
instr_at_put(pos + 3 * Assembler::kInstrSize,
instr_ori2 | (imm & kImm16Mask));
- } else {
- DCHECK(IsJ(instr) || IsJal(instr));
- uint64_t imm28 = reinterpret_cast<uint64_t>(buffer_) + target_pos;
- imm28 &= kImm28Mask;
+ } else if (IsJ(instr) || IsJal(instr)) {
+ int32_t imm28 = target_pos - pos;
DCHECK((imm28 & 3) == 0);
- instr &= ~kImm26Mask;
uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
DCHECK(is_uint26(imm26));
+ // Place 26-bit signed offset with markings.
+ // When code is committed it will be resolved to j/jal.
+ int32_t mark = IsJ(instr) ? kJRawMark : kJalRawMark;
+ instr_at_put(pos, mark | (imm26 & kImm26Mask));
+ } else {
+ int32_t imm28 = target_pos - pos;
+ DCHECK((imm28 & 3) == 0);
+ uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
+ DCHECK(is_uint26(imm26));
+ // Place raw 26-bit signed offset.
+ // When code is committed it will be resolved to j/jal.
+ instr &= ~kImm26Mask;
instr_at_put(pos, instr | (imm26 & kImm26Mask));
}
}
@@ -1027,6 +1031,26 @@ uint64_t Assembler::jump_address(Label* L) {
}
+uint64_t Assembler::jump_offset(Label* L) {
+ int64_t target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ return kEndOfJumpChain;
+ }
+ }
+ int64_t imm = target_pos - pc_offset();
+ DCHECK((imm & 3) == 0);
+
+ return static_cast<uint64_t>(imm);
+}
+
+
int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
int32_t target_pos;
if (L->is_bound()) {
@@ -1405,19 +1429,32 @@ void Assembler::bnezc(Register rs, int32_t offset) {
void Assembler::j(int64_t target) {
-#if DEBUG
- // Get pc of delay slot.
- if (target != kEndOfJumpChain) {
- uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((ipc ^ static_cast<uint64_t>(target)) >>
- (kImm26Bits + kImmFieldShift)) == 0;
- DCHECK(in_range && ((target & 3) == 0));
- }
-#endif
GenInstrJump(J, static_cast<uint32_t>(target >> 2) & kImm26Mask);
}
+void Assembler::j(Label* target) {
+ uint64_t imm = jump_offset(target);
+ if (target->is_bound()) {
+ GenInstrJump(static_cast<Opcode>(kJRawMark),
+ static_cast<uint32_t>(imm >> 2) & kImm26Mask);
+ } else {
+ j(imm);
+ }
+}
+
+
+void Assembler::jal(Label* target) {
+ uint64_t imm = jump_offset(target);
+ if (target->is_bound()) {
+ GenInstrJump(static_cast<Opcode>(kJalRawMark),
+ static_cast<uint32_t>(imm >> 2) & kImm26Mask);
+ } else {
+ jal(imm);
+ }
+}
+
+
void Assembler::jr(Register rs) {
if (kArchVariant != kMips64r6) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -1433,15 +1470,6 @@ void Assembler::jr(Register rs) {
void Assembler::jal(int64_t target) {
-#ifdef DEBUG
- // Get pc of delay slot.
- if (target != kEndOfJumpChain) {
- uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((ipc ^ static_cast<uint64_t>(target)) >>
- (kImm26Bits + kImmFieldShift)) == 0;
- DCHECK(in_range && ((target & 3) == 0));
- }
-#endif
positions_recorder()->WriteRecordedPositions();
GenInstrJump(JAL, static_cast<uint32_t>(target >> 2) & kImm26Mask);
}
@@ -2920,7 +2948,6 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
}
Instr instr = instr_at(pc);
DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
- DCHECK(IsJ(instr) || IsLui(instr) || IsJal(instr));
if (IsLui(instr)) {
Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
@@ -2951,22 +2978,30 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
instr_at_put(pc + 3 * Assembler::kInstrSize,
instr_ori2 | (imm & kImm16Mask));
return 4; // Number of instructions patched.
- } else {
+ } else if (IsJ(instr) || IsJal(instr)) {
+ // Regular j/jal relocation.
uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
- if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
- return 0; // Number of instructions patched.
- }
-
imm28 += pc_delta;
imm28 &= kImm28Mask;
- DCHECK((imm28 & 3) == 0);
-
instr &= ~kImm26Mask;
- uint32_t imm26 = imm28 >> 2;
- DCHECK(is_uint26(imm26));
-
+ DCHECK((imm28 & 3) == 0);
+ uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
instr_at_put(pc, instr | (imm26 & kImm26Mask));
return 1; // Number of instructions patched.
+ } else {
+ // Unbox raw offset and emit j/jal.
+ int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+ // Sign extend 28-bit offset to 32-bit.
+ imm28 = (imm28 << 4) >> 4;
+ uint64_t target =
+ static_cast<int64_t>(imm28) + reinterpret_cast<uint64_t>(pc);
+ target &= kImm28Mask;
+ DCHECK((imm28 & 3) == 0);
+ uint32_t imm26 = static_cast<uint32_t>(target >> 2);
+ // Check markings whether to emit j or jal.
+ uint32_t unbox = (instr & kJRawMark) ? J : JAL;
+ instr_at_put(pc, unbox | (imm26 & kImm26Mask));
+ return 1; // Number of instructions patched.
}
}
@@ -3009,8 +3044,7 @@ void Assembler::GrowBuffer() {
// Relocate runtime entries.
for (RelocIterator it(desc); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED ||
- rmode == RelocInfo::INTERNAL_REFERENCE) {
+ if (rmode == RelocInfo::INTERNAL_REFERENCE) {
byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
RelocateInternalReference(rmode, p, pc_delta);
}
@@ -3066,10 +3100,10 @@ void Assembler::emit_code_stub_address(Code* stub) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(pc_, rmode, data, NULL);
- if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
+ if (rmode >= RelocInfo::COMMENT &&
+ rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL) {
// Adjust code for new modes.
DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode));
// These modes do not need an entry in the constant pool.
@@ -3130,14 +3164,12 @@ void Assembler::CheckTrampolinePool() {
int pool_start = pc_offset();
for (int i = 0; i < unbound_labels_count_; i++) {
- uint64_t imm64;
- imm64 = jump_address(&after_pool);
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
// references until associated instructions are emitted and available
// to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- j(imm64);
+ j(&after_pool);
}
nop();
}
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index 314970238b..0164072333 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -75,7 +75,7 @@ namespace internal {
// Core register.
struct Register {
static const int kNumRegisters = v8::internal::kNumRegisters;
- static const int kMaxNumAllocatableRegisters = 14; // v0 through t6 and cp.
+ static const int kMaxNumAllocatableRegisters = 14; // v0 through t2 and cp.
static const int kSizeInBytes = 8;
static const int kCpRegister = 23; // cp (s7) is the 23rd register.
@@ -481,6 +481,7 @@ class Assembler : public AssemblerBase {
return o >> 2;
}
uint64_t jump_address(Label* L);
+ uint64_t jump_offset(Label* L);
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
@@ -518,9 +519,6 @@ class Assembler : public AssemblerBase {
// of that call in the instruction stream.
inline static Address target_address_from_return_address(Address pc);
- // Return the code target address of the patch debug break slot
- inline static Address break_address_from_return_address(Address pc);
-
static void JumpLabelToJumpRegister(Address pc);
static void QuietNaN(HeapObject* nan);
@@ -566,25 +564,14 @@ class Assembler : public AssemblerBase {
// target and the return address.
static const int kCallTargetAddressOffset = 6 * kInstrSize;
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- static const int kPatchReturnSequenceAddressOffset = 0;
-
// Distance between start of patched debug break slot and the emitted address
// to jump to.
- static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
+ static const int kPatchDebugBreakSlotAddressOffset = 6 * kInstrSize;
// Difference between address of current opcode and value read from pc
// register.
static const int kPcLoadDelta = 4;
- static const int kPatchDebugBreakSlotReturnOffset = 6 * kInstrSize;
-
- // Number of instructions used for the JS return sequence. The constant is
- // used by the debugger to patch the JS return sequence.
- static const int kJSReturnSequenceInstructions = 7;
- static const int kJSReturnSequenceLength =
- kJSReturnSequenceInstructions * kInstrSize;
static const int kDebugBreakSlotInstructions = 6;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
@@ -747,6 +734,8 @@ class Assembler : public AssemblerBase {
// Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
void j(int64_t target);
void jal(int64_t target);
+ void j(Label* target);
+ void jal(Label* target);
void jalr(Register rs, Register rd = ra);
void jr(Register target);
void jic(Register rt, int16_t offset);
@@ -1100,11 +1089,11 @@ class Assembler : public AssemblerBase {
// Debugging.
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
+ // Mark generator continuation.
+ void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot();
+ void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
diff --git a/deps/v8/src/mips64/builtins-mips64.cc b/deps/v8/src/mips64/builtins-mips64.cc
index ca916374a8..5754117140 100644
--- a/deps/v8/src/mips64/builtins-mips64.cc
+++ b/deps/v8/src/mips64/builtins-mips64.cc
@@ -2,16 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/codegen.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -315,39 +311,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
-static void Generate_Runtime_NewObject(MacroAssembler* masm,
- bool create_memento,
- Register original_constructor,
- Label* count_incremented,
- Label* allocated) {
- if (create_memento) {
- // Get the cell or allocation site.
- __ ld(a2, MemOperand(sp, 2 * kPointerSize));
- __ push(a2);
- }
-
- __ push(a1); // argument for Runtime_NewObject
- __ push(original_constructor); // original constructor
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- } else {
- __ CallRuntime(Runtime::kNewObject, 2);
- }
- __ mov(t0, v0);
-
- // Runtime_NewObjectWithAllocationSite increments allocation count.
- // Skip the increment.
- if (create_memento) {
- __ jmp(count_incremented);
- } else {
- __ jmp(allocated);
- }
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -363,53 +328,33 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Isolate* isolate = masm->isolate();
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- ra : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
- if (create_memento) {
- __ AssertUndefinedOrAllocationSite(a2, t0);
- __ push(a2);
- }
-
// Preserve the incoming parameters on the stack.
+ __ AssertUndefinedOrAllocationSite(a2, t0);
__ SmiTag(a0);
- if (use_new_target) {
- __ Push(a0, a1, a3);
- } else {
- __ Push(a0, a1);
- }
-
- Label rt_call, allocated, normal_new, count_incremented;
- __ Branch(&normal_new, eq, a1, Operand(a3));
-
- // Original constructor and function are different.
- Generate_Runtime_NewObject(masm, create_memento, a3, &count_incremented,
- &allocated);
- __ bind(&normal_new);
+ __ Push(a2, a0, a1, a3);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
if (FLAG_inline_new) {
- Label undo_allocation;
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(isolate);
__ li(a2, Operand(debug_step_in_fp));
__ ld(a2, MemOperand(a2));
__ Branch(&rt_call, ne, a2, Operand(zero_reg));
+ // Fall back to runtime if the original constructor and function differ.
+ __ Branch(&rt_call, ne, a1, Operand(a3));
+
// Load the initial map and verify that it is in fact a map.
// a1: constructor function
__ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(a2, &rt_call);
- __ GetObjectType(a2, a3, t0);
+ __ GetObjectType(a2, t1, t0);
__ Branch(&rt_call, ne, t0, Operand(MAP_TYPE));
// Check that the constructor is not constructing a JSFunction (see
@@ -417,8 +362,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map's instance type would be JS_FUNCTION_TYPE.
// a1: constructor function
// a2: initial map
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
+ __ lbu(t1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Branch(&rt_call, eq, t1, Operand(JS_FUNCTION_TYPE));
if (!is_api_function) {
Label allocate;
@@ -446,12 +391,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Now allocate the JSObject on the heap.
// a1: constructor function
// a2: initial map
+ Label rt_call_reload_new_target;
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
if (create_memento) {
__ Daddu(a3, a3, Operand(AllocationMemento::kSize / kPointerSize));
}
- __ Allocate(a3, t0, t1, t2, &rt_call, SIZE_IN_WORDS);
+ __ Allocate(a3, t0, t1, t2, &rt_call_reload_new_target, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
@@ -489,9 +435,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Operand(static_cast<int64_t>(Map::kSlackTrackingCounterEnd)));
// Allocate object with a slack.
- __ lwu(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
- __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
+ __ lbu(
+ a0,
+ FieldMemOperand(
+ a2, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
+ __ lbu(a2, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ __ dsubu(a0, a0, a2);
__ dsll(at, a0, kPointerSizeLog2);
__ daddu(a0, t1, at);
// a0: offset of first field after pre-allocated fields
@@ -522,7 +471,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ sd(t3, MemOperand(t1));
__ Daddu(t1, t1, kPointerSize);
// Load the AllocationSite.
- __ ld(t3, MemOperand(sp, 2 * kPointerSize));
+ __ ld(t3, MemOperand(sp, 3 * kPointerSize));
+ __ AssertUndefinedOrAllocationSite(t3, a0);
DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
__ sd(t3, MemOperand(t1));
__ Daddu(t1, t1, kPointerSize);
@@ -533,115 +483,49 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
+ // and jump into the continuation code at any time from now on.
__ Daddu(t0, t0, Operand(kHeapObjectTag));
- // Check if a non-empty properties array is needed. Continue with
- // allocated object if not; allocate and initialize a FixedArray if yes.
- // a1: constructor function
- // t0: JSObject
- // t1: start of next object (not tagged)
- __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields
- // and in-object properties.
- __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
- __ Ext(t2, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ Daddu(a3, a3, Operand(t2));
- __ Ext(t2, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
- kBitsPerByte);
- __ dsubu(a3, a3, t2);
-
- // Done if no extra properties are to be allocated.
- __ Branch(&allocated, eq, a3, Operand(zero_reg));
- __ Assert(greater_equal, kPropertyAllocationCountFailed,
- a3, Operand(zero_reg));
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // a1: constructor
- // a3: number of elements in properties array
- // t0: JSObject
- // t1: start of next object
- __ Daddu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ Allocate(
- a0,
- t1,
- t2,
- a2,
- &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
- // Initialize the FixedArray.
- // a1: constructor
- // a3: number of elements in properties array (untagged)
- // t0: JSObject
- // t1: start of FixedArray (untagged)
- __ LoadRoot(t2, Heap::kFixedArrayMapRootIndex);
- __ mov(a2, t1);
- __ sd(t2, MemOperand(a2, JSObject::kMapOffset));
- // Tag number of elements.
- __ dsll32(a0, a3, 0);
- __ sd(a0, MemOperand(a2, FixedArray::kLengthOffset));
- __ Daddu(a2, a2, Operand(2 * kPointerSize));
-
- DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
- DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
-
- // Initialize the fields to undefined.
- // a1: constructor
- // a2: First element of FixedArray (not tagged)
- // a3: number of elements in properties array
- // t0: JSObject
- // t1: FixedArray (not tagged)
- __ dsll(a7, a3, kPointerSizeLog2);
- __ daddu(t2, a2, a7); // End of object.
- DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- if (!is_api_function || create_memento) {
- __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
- __ Assert(eq, kUndefinedValueNotLoaded, t3, Operand(a6));
- }
- __ InitializeFieldsWithFiller(a2, t2, t3);
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject.
- // a1: constructor function
- // t0: JSObject
- // t1: FixedArray (not tagged)
- __ Daddu(t1, t1, Operand(kHeapObjectTag)); // Add the heap tag.
- __ sd(t1, FieldMemOperand(t0, JSObject::kPropertiesOffset));
-
// Continue with JSObject being successfully allocated.
- // a1: constructor function
// a4: JSObject
__ jmp(&allocated);
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // t0: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(t0, t1);
+ // Reload the original constructor and fall-through.
+ __ bind(&rt_call_reload_new_target);
+ __ ld(a3, MemOperand(sp, 0 * kPointerSize));
}
// Allocate the new receiver object using the runtime call.
// a1: constructor function
+ // a3: original constructor
__ bind(&rt_call);
- Generate_Runtime_NewObject(masm, create_memento, a1, &count_incremented,
- &allocated);
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ ld(a2, MemOperand(sp, 3 * kPointerSize));
+ __ push(a2); // argument 1: allocation site
+ }
+ __ Push(a1, a3); // arguments 2-3 / 1-2
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 2);
+ }
+ __ mov(t0, v0);
+
+ // Runtime_NewObjectWithAllocationSite increments allocation count.
+ // Skip the increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
// Receiver for constructor call allocated.
// t0: JSObject
__ bind(&allocated);
if (create_memento) {
- int offset = (use_new_target ? 3 : 2) * kPointerSize;
- __ ld(a2, MemOperand(sp, offset));
+ __ ld(a2, MemOperand(sp, 3 * kPointerSize));
__ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
__ Branch(&count_incremented, eq, a2, Operand(t1));
// a2 is an AllocationSite. We are creating a memento from it, so we
@@ -655,19 +539,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore the parameters.
- if (use_new_target) {
- __ Pop(a3); // new.target
- }
+ __ Pop(a3); // new.target
__ Pop(a1);
__ ld(a0, MemOperand(sp));
__ SmiUntag(a0);
- if (use_new_target) {
- __ Push(a3, t0, t0);
- } else {
- __ Push(t0, t0);
- }
+ __ Push(a3, t0, t0);
// Set up pointer to last argument.
__ Daddu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -679,8 +557,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a3: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: new.target (if used)
- // sp[2/3]: number of arguments (smi-tagged)
+ // sp[2]: new.target
+ // sp[3]: number of arguments (smi-tagged)
Label loop, entry;
__ mov(a3, a0);
__ jmp(&entry);
@@ -707,9 +585,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- // TODO(arv): Remove the "!use_new_target" before supporting optimization
- // of functions that reference new.target
- if (!is_api_function && !use_new_target) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@@ -724,8 +600,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense.
// v0: result
// sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (if used)
- // sp[1/2]: number of arguments (smi-tagged)
+ // sp[1]: new.target
+ // sp[2]: number of arguments (smi-tagged)
__ JumpIfSmi(v0, &use_receiver);
// If the type of the result (stored in its map) is less than
@@ -743,10 +619,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&exit);
// v0: result
// sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (if used)
- // sp[1/2]: number of arguments (smi-tagged)
- int offset = (use_new_target ? 2 : 1) * kPointerSize;
- __ ld(a1, MemOperand(sp, offset));
+ // sp[1]: new.target (original constructor)
+ // sp[2]: number of arguments (smi-tagged)
+ __ ld(a1, MemOperand(sp, 2 * kPointerSize));
// Leave construct frame.
}
@@ -760,17 +635,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, true, false);
}
@@ -784,12 +654,12 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
- // TODO(dslomov): support pretenuring
- CHECK(!FLAG_pretenuring_call_new);
-
{
FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
+ __ AssertUndefinedOrAllocationSite(a2, t0);
+ __ push(a2);
+
__ mov(a4, a0);
__ SmiTag(a4);
__ push(a4); // Smi-tagged arguments count.
@@ -987,6 +857,148 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// o a1: the JS function object being called.
+// o cp: our context
+// o fp: the caller's frame pointer
+// o sp: stack pointer
+// o ra: return address
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-mips.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+
+ __ Push(ra, fp, cp, a1);
+ __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into kInterpreterBytecodeRegister.
+ __ ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister, a4);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a4,
+ Operand(zero_reg));
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, a4, a4);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a4,
+ Operand(BYTECODE_ARRAY_TYPE));
+ }
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size (word) from the BytecodeArray object.
+ __ lw(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ __ Dsubu(a5, sp, Operand(a4));
+ __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ __ Branch(&ok, hs, a5, Operand(a2));
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ Label loop_header;
+ Label loop_check;
+ __ LoadRoot(a5, Heap::kUndefinedValueRootIndex);
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ __ push(a5);
+ // Continue loop if not done.
+ __ bind(&loop_check);
+ __ Dsubu(a4, a4, Operand(kPointerSize));
+ __ Branch(&loop_header, ge, a4, Operand(zero_reg));
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Support profiler (specifically profiling_counter).
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Allow simulator stop operations if FLAG_stop_at is set.
+ // - Deal with sloppy mode functions which need to replace the
+ // receiver with the global proxy when called as functions (without an
+ // explicit receiver object).
+ // - Code aging of the BytecodeArray object.
+ // - Supporting FLAG_trace.
+ //
+ // The following items are also not done here, and will probably be done using
+ // explicit bytecodes instead:
+ // - Allocating a new local context if applicable.
+ // - Setting up a local binding to the this function, which is used in
+ // derived constructors with super calls.
+ // - Setting new.target if required.
+ // - Dealing with REST parameters (only if
+ // https://codereview.chromium.org/1235153006 doesn't land by then).
+ // - Dealing with argument objects.
+
+ // Perform stack guard check.
+ {
+ Label ok;
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(at));
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ bind(&ok);
+ }
+
+ // Load bytecode offset and dispatch table into registers.
+ __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ Dsubu(
+ kInterpreterRegisterFileRegister, fp,
+ Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ Daddu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Dispatch to the first bytecode handler for the function.
+ __ Daddu(a0, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ lbu(a0, MemOperand(a0));
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ Daddu(at, kInterpreterDispatchTableRegister, at);
+ __ ld(at, MemOperand(at));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(at);
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // The return value is in accumulator, which is already in v0.
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ // Drop receiver + arguments.
+ __ Drop(1); // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+ __ Jump(ra);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@@ -1300,8 +1312,10 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(a0);
- __ Push(a0, a2);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Push(a0);
+ __ mov(a0, a2);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mov(a2, v0);
__ pop(a0);
@@ -1414,6 +1428,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int vectorOffset,
const int argumentsOffset,
const int indexOffset,
const int limitOffset) {
@@ -1431,12 +1446,9 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ ld(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
- FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
- Handle<TypeFeedbackVector> feedback_vector =
- masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
- int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
- __ li(slot, Operand(Smi::FromInt(index)));
- __ li(vector, feedback_vector);
+ int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
+ __ li(slot, Operand(Smi::FromInt(slot_index)));
+ __ ld(vector, MemOperand(fp, vectorOffset));
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -1470,6 +1482,13 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
const int kReceiverOffset = kArgumentsOffset + kPointerSize;
const int kFunctionOffset = kReceiverOffset + kPointerSize;
+ const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+
+ // Push the vector.
+ __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(a1, FieldMemOperand(a1, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ Push(a1);
__ ld(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
__ push(a0);
@@ -1487,10 +1506,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
Generate_CheckStackOverflow(masm, kFunctionOffset, v0, kArgcIsSmiTagged);
// Push current limit and index.
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
+ const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
__ mov(a1, zero_reg);
__ Push(v0, a1); // Limit and initial index.
@@ -1536,8 +1553,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
// Convert the receiver to a regular object.
// a0: receiver
__ bind(&call_to_object);
- __ push(a0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
__ Branch(&push_receiver);
@@ -1551,8 +1568,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ push(a0);
// Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// Call the function.
Label call_proxy;
@@ -1592,6 +1609,13 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
const int kFunctionOffset = kArgumentsOffset + kPointerSize;
+ const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+
+ // Push the vector.
+ __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(a1, FieldMemOperand(a1, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ Push(a1);
// If newTarget is not supplied, set it to constructor
Label validate_arguments;
@@ -1616,33 +1640,28 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
Generate_CheckStackOverflow(masm, kFunctionOffset, v0, kArgcIsSmiTagged);
// Push current limit and index.
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
+ const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
__ push(v0); // limit
__ mov(a1, zero_reg); // initial index
__ push(a1);
- // Push newTarget and callee functions
- __ ld(a0, MemOperand(fp, kNewTargetOffset));
- __ push(a0);
+ // Push the constructor function as callee.
__ ld(a0, MemOperand(fp, kFunctionOffset));
__ push(a0);
// Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// Use undefined feedback vector
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ ld(a1, MemOperand(fp, kFunctionOffset));
+ __ ld(a4, MemOperand(fp, kNewTargetOffset));
// Call the function.
CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
-
// Leave internal frame.
}
__ jr(ra);
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 90a72ada64..191b9607f8 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/bootstrapper.h"
@@ -13,8 +11,8 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
-#include "src/jsregexp.h"
-#include "src/regexp-macro-assembler.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -32,7 +30,7 @@ static void InitializeArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -48,7 +46,7 @@ static void InitializeInternalArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -291,6 +289,8 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
// Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics, since
// we need to throw a TypeError. Smis have already been ruled out.
@@ -302,16 +302,18 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
// Comparing JS objects with <=, >= is complicated.
if (cc != eq) {
- __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
- // Call runtime on identical symbols since we need to throw a TypeError.
- __ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics,
- // since we need to throw a TypeError. Smis and heap numbers have
- // already been ruled out.
- __ And(t0, t0, Operand(kIsNotStringMask));
- __ Branch(slow, ne, t0, Operand(zero_reg));
- }
+ __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Call runtime on identical symbols since we need to throw a TypeError.
+ __ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
+ if (is_strong(strength)) {
+ // Call the runtime on anything that is converted in the semantics,
+ // since we need to throw a TypeError. Smis and heap numbers have
+ // already been ruled out.
+ __ And(t0, t0, Operand(kIsNotStringMask));
+ __ Branch(slow, ne, t0, Operand(zero_reg));
+ }
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -719,26 +721,30 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// a1 (rhs) second.
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
- if (cc == eq) {
- native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == eq && strict()) {
+ __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
} else {
- native =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- int ncr; // NaN compare result.
- if (cc == lt || cc == le) {
- ncr = GREATER;
+ Builtins::JavaScript native;
+ if (cc == eq) {
+ native = Builtins::EQUALS;
} else {
- DCHECK(cc == gt || cc == ge); // Remaining cases.
- ncr = LESS;
+ native =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
+ int ncr; // NaN compare result.
+ if (cc == lt || cc == le) {
+ ncr = GREATER;
+ } else {
+ DCHECK(cc == gt || cc == ge); // Remaining cases.
+ ncr = LESS;
+ }
+ __ li(a0, Operand(Smi::FromInt(ncr)));
+ __ push(a0);
}
- __ li(a0, Operand(Smi::FromInt(ncr)));
- __ push(a0);
- }
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -1673,7 +1679,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(a1);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments, 1, 1);
}
@@ -1938,10 +1944,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
- masm->isolate()),
- 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -2545,16 +2548,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
+ bool is_super) {
// a0 : number of arguments to the construct function
- // a2 : Feedback vector
+ // a2 : feedback vector
// a3 : slot in feedback vector (Smi)
// a1 : the function to call
+ // a4 : original constructor (for IsSuperConstructorCall)
FrameScope scope(masm, StackFrame::INTERNAL);
- const RegList kSavedRegs = 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6 | // a2
- 1 << 7; // a3
+ const RegList kSavedRegs = 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7 | // a3
+ BoolToInt(is_super) << 8; // a4
+
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(a0);
@@ -2567,14 +2574,15 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
+static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a0 : number of arguments to the construct function
// a1 : the function to call
- // a2 : Feedback vector
+ // a2 : feedback vector
// a3 : slot in feedback vector (Smi)
+ // a4 : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2582,23 +2590,23 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
- // Load the cache state into a4.
- __ dsrl(a4, a3, 32 - kPointerSizeLog2);
- __ Daddu(a4, a2, Operand(a4));
- __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
+ // Load the cache state into a5.
+ __ dsrl(a5, a3, 32 - kPointerSizeLog2);
+ __ Daddu(a5, a2, Operand(a5));
+ __ ld(a5, FieldMemOperand(a5, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- // We don't know if a4 is a WeakCell or a Symbol, but it's harmless to read at
+ // We don't know if a5 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in type-feedback-vector.h).
Label check_allocation_site;
- Register feedback_map = a5;
+ Register feedback_map = a6;
Register weak_value = t0;
- __ ld(weak_value, FieldMemOperand(a4, WeakCell::kValueOffset));
+ __ ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
__ Branch(&done, eq, a1, Operand(weak_value));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ Branch(&done, eq, a4, Operand(at));
- __ ld(feedback_map, FieldMemOperand(a4, HeapObject::kMapOffset));
+ __ Branch(&done, eq, a5, Operand(at));
+ __ ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kWeakCellMapRootIndex);
__ Branch(FLAG_pretenuring_call_new ? &miss : &check_allocation_site, ne,
feedback_map, Operand(at));
@@ -2617,8 +2625,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ Branch(&miss, ne, feedback_map, Operand(at));
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
- __ Branch(&megamorphic, ne, a1, Operand(a4));
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5);
+ __ Branch(&megamorphic, ne, a1, Operand(a5));
__ jmp(&done);
}
@@ -2627,35 +2635,35 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
- __ Branch(&initialize, eq, a4, Operand(at));
+ __ Branch(&initialize, eq, a5, Operand(at));
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ dsrl(a4, a3, 32 - kPointerSizeLog2);
- __ Daddu(a4, a2, Operand(a4));
+ __ dsrl(a5, a3, 32 - kPointerSizeLog2);
+ __ Daddu(a5, a2, Operand(a5));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
+ __ sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize));
__ jmp(&done);
// An uninitialized cache is patched with the function.
__ bind(&initialize);
if (!FLAG_pretenuring_call_new) {
// Make sure the function is the Array() function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
- __ Branch(&not_array_function, ne, a1, Operand(a4));
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5);
+ __ Branch(&not_array_function, ne, a1, Operand(a5));
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub);
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ Branch(&done);
__ bind(&not_array_function);
}
CreateWeakCellStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub);
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ bind(&done);
}
@@ -2708,8 +2716,10 @@ static void EmitSlowCase(MacroAssembler* masm,
static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
// Wrap the receiver and patch it back onto the stack.
{ FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(a1, a3);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Push(a1);
+ __ mov(a0, a3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ pop(a1);
}
__ Branch(USE_DELAY_SLOT, cont);
@@ -2780,16 +2790,17 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
// a2 : feedback vector
- // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
+ // a3 : slot in feedback vector (Smi, for RecordCallTarget)
+ // a4 : original constructor (for IsSuperConstructorCall)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(a1, &non_function_call);
// Check that the function is a JSFunction.
- __ GetObjectType(a1, a4, a4);
- __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
+ __ GetObjectType(a1, a5, a5);
+ __ Branch(&slow, ne, a5, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ GenerateRecordCallTarget(masm, IsSuperConstructorCall());
__ dsrl(at, a3, 32 - kPointerSizeLog2);
__ Daddu(a5, a2, at);
@@ -2814,11 +2825,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// Pass function as original constructor.
if (IsSuperConstructorCall()) {
- __ li(a4, Operand(1 * kPointerSize));
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(a4, a4, at);
- __ daddu(at, sp, a4);
- __ ld(a3, MemOperand(at, 0));
+ __ mov(a3, a4);
} else {
__ mov(a3, a1);
}
@@ -2833,10 +2840,10 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// a0: number of arguments
// a1: called object
- // a4: object type
+ // a5: object type
Label do_call;
__ bind(&slow);
- __ Branch(&non_function_call, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Branch(&non_function_call, ne, a5, Operand(JS_FUNCTION_PROXY_TYPE));
__ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
@@ -3111,11 +3118,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(a1, a2, a3);
// Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
-
- ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
- __ CallExternalReference(miss, 3);
+ Runtime::FunctionId id = GetICState() == DEFAULT
+ ? Runtime::kCallIC_Miss //
+ : Runtime::kCallIC_Customization_Miss;
+ __ CallRuntime(id, 3);
// Move result to a1 and exit the internal frame.
__ mov(a1, v0);
@@ -3193,27 +3199,17 @@ void StringCharCodeAtGenerator::GenerateSlow(
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
-
- DCHECK(!a4.is(result_));
- DCHECK(!a4.is(code_));
-
- STATIC_ASSERT(kSmiTag == 0);
- DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
- __ And(a4,
- code_,
- Operand(kSmiTagMask |
- ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
- __ Branch(&slow_case_, ne, a4, Operand(zero_reg));
-
+ __ JumpIfNotSmi(code_, &slow_case_);
+ __ Branch(&slow_case_, hi, code_,
+ Operand(Smi::FromInt(String::kMaxOneByteCharCode)));
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged one_byte char code.
- STATIC_ASSERT(kSmiTag == 0);
- __ SmiScale(a4, code_, kPointerSizeLog2);
- __ Daddu(result_, result_, a4);
+ __ SmiScale(at, code_, kPointerSizeLog2);
+ __ Daddu(result_, result_, at);
__ ld(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ LoadRoot(a4, Heap::kUndefinedValueRootIndex);
- __ Branch(&slow_case_, eq, result_, Operand(a4));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&slow_case_, eq, result_, Operand(at));
__ bind(&exit_);
}
@@ -3301,11 +3297,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ ld(a2, MemOperand(sp, kToOffset));
__ ld(a3, MemOperand(sp, kFromOffset));
-// Does not needed?
-// STATIC_ASSERT(kFromOffset == kToOffset + 4);
+
STATIC_ASSERT(kSmiTag == 0);
-// Does not needed?
-// STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
// Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
// safe in this case.
@@ -3491,13 +3484,14 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
__ bind(&single_char);
// v0: original string
// a1: instance type
// a2: length
// a3: from index (untagged)
+ __ SmiTag(a3);
StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
generator.GenerateFast(masm);
@@ -3684,7 +3678,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, a4, a5);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3994,7 +3988,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ bind(&miss);
@@ -4044,14 +4038,13 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a0);
__ Push(ra, a1, a0);
__ li(a4, Operand(Smi::FromInt(op())));
__ daddiu(sp, sp, -kPointerSize);
- __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
+ __ CallRuntime(Runtime::kCompareIC_Miss, 3, kDontSaveFPRegs,
+ USE_DELAY_SLOT);
__ sd(a4, MemOperand(sp)); // In the delay slot.
// Compute the entry point of the rewritten stub.
__ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4127,7 +4120,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Register entity_name = scratch0;
// Having undefined at this place means the name is not contained.
- DCHECK_EQ(kSmiTagSize, 1);
+ STATIC_ASSERT(kSmiTagSize == 1);
Register tmp = properties;
__ dsll(scratch0, index, kPointerSizeLog2);
@@ -4217,8 +4210,8 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ dsrl(scratch2, scratch2, Name::kHashShift);
__ And(scratch2, scratch1, scratch2);
- // Scale the index by multiplying by the element size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ // Scale the index by multiplying by the entry size.
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
__ dsll(at, scratch2, 1);
@@ -4305,14 +4298,14 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ And(index, mask, index);
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// index *= 3.
__ mov(at, index);
__ dsll(index, index, 1);
__ Daddu(index, index, at);
- DCHECK_EQ(kSmiTagSize, 1);
+ STATIC_ASSERT(kSmiTagSize == 1);
__ dsll(index, index, kPointerSizeLog2);
__ Daddu(index, index, dictionary);
__ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
@@ -4792,7 +4785,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
- false, receiver, name, feedback,
+ receiver, name, feedback,
receiver_map, scratch1, a7);
__ bind(&miss);
@@ -5035,12 +5028,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// sp[0] - last argument
Label normal_sequence;
if (mode == DONT_OVERRIDE) {
- DCHECK(FAST_SMI_ELEMENTS == 0);
- DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
- DCHECK(FAST_ELEMENTS == 2);
- DCHECK(FAST_HOLEY_ELEMENTS == 3);
- DCHECK(FAST_DOUBLE_ELEMENTS == 4);
- DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// is the low bit set? If so, we are holey and that is good.
__ And(at, a3, Operand(1));
@@ -5312,6 +5305,152 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context_reg = cp;
+ Register slot_reg = a2;
+ Register result_reg = v0;
+ Label slow_case;
+
+ // Go up context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ ld(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ context_reg = result_reg;
+ }
+
+ // Load the PropertyCell value at the specified slot.
+ __ dsll(at, slot_reg, kPointerSizeLog2);
+ __ Daddu(at, at, Operand(context_reg));
+ __ ld(result_reg, ContextOperand(at, 0));
+ __ ld(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
+
+ // Check that value is not the_hole.
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&slow_case, eq, result_reg, Operand(at));
+ __ Ret();
+
+ // Fallback to the runtime.
+ __ bind(&slow_case);
+ __ SmiTag(slot_reg);
+ __ Push(slot_reg);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+}
+
+
+void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context_reg = cp;
+ Register slot_reg = a2;
+ Register value_reg = a0;
+ Register cell_reg = a4;
+ Register cell_value_reg = a5;
+ Register cell_details_reg = a6;
+ Label fast_heapobject_case, fast_smi_case, slow_case;
+
+ if (FLAG_debug_code) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Check(ne, kUnexpectedValue, value_reg, Operand(at));
+ }
+
+ // Go up context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ ld(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ context_reg = cell_reg;
+ }
+
+ // Load the PropertyCell at the specified slot.
+ __ dsll(at, slot_reg, kPointerSizeLog2);
+ __ Daddu(at, at, Operand(context_reg));
+ __ ld(cell_reg, ContextOperand(at, 0));
+
+ // Load PropertyDetails for the cell (actually only the cell_type and kind).
+ __ ld(cell_details_reg,
+ FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset));
+ __ SmiUntag(cell_details_reg);
+ __ And(cell_details_reg, cell_details_reg,
+ PropertyDetails::PropertyCellTypeField::kMask |
+ PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask);
+
+ // Check if PropertyCell holds mutable data.
+ Label not_mutable_data;
+ __ Branch(&not_mutable_data, ne, cell_details_reg,
+ Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kMutable) |
+ PropertyDetails::KindField::encode(kData)));
+ __ JumpIfSmi(value_reg, &fast_smi_case);
+ __ bind(&fast_heapobject_case);
+ __ sd(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
+ __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
+ cell_details_reg, kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ // RecordWriteField clobbers the value register, so we need to reload.
+ __ Ret(USE_DELAY_SLOT);
+ __ ld(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
+ __ bind(&not_mutable_data);
+
+ // Check if PropertyCell value matches the new value (relevant for Constant,
+ // ConstantType and Undefined cells).
+ Label not_same_value;
+ __ ld(cell_value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
+ __ Branch(&not_same_value, ne, value_reg, Operand(cell_value_reg));
+ // Make sure the PropertyCell is not marked READ_ONLY.
+ __ And(at, cell_details_reg, PropertyDetails::kAttributesReadOnlyMask);
+ __ Branch(&slow_case, ne, at, Operand(zero_reg));
+ if (FLAG_debug_code) {
+ Label done;
+ // This can only be true for Constant, ConstantType and Undefined cells,
+ // because we never store the_hole via this stub.
+ __ Branch(&done, eq, cell_details_reg,
+ Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstant) |
+ PropertyDetails::KindField::encode(kData)));
+ __ Branch(&done, eq, cell_details_reg,
+ Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+ __ Check(eq, kUnexpectedValue, cell_details_reg,
+ Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kUndefined) |
+ PropertyDetails::KindField::encode(kData)));
+ __ bind(&done);
+ }
+ __ Ret();
+ __ bind(&not_same_value);
+
+ // Check if PropertyCell contains data with constant type (and is not
+ // READ_ONLY).
+ __ Branch(&slow_case, ne, cell_details_reg,
+ Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+
+ // Now either both old and new values must be SMIs or both must be heap
+ // objects with same map.
+ Label value_is_heap_object;
+ __ JumpIfNotSmi(value_reg, &value_is_heap_object);
+ __ JumpIfNotSmi(cell_value_reg, &slow_case);
+ // Old and new values are SMIs, no need for a write barrier here.
+ __ bind(&fast_smi_case);
+ __ Ret(USE_DELAY_SLOT);
+ __ sd(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
+ __ bind(&value_is_heap_object);
+ __ JumpIfSmi(cell_value_reg, &slow_case);
+ Register cell_value_map_reg = cell_value_reg;
+ __ ld(cell_value_map_reg,
+ FieldMemOperand(cell_value_reg, HeapObject::kMapOffset));
+ __ Branch(&fast_heapobject_case, eq, cell_value_map_reg,
+ FieldMemOperand(value_reg, HeapObject::kMapOffset));
+
+ // Fallback to the runtime.
+ __ bind(&slow_case);
+ __ SmiTag(slot_reg);
+ __ Push(slot_reg, value_reg);
+ __ TailCallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2, 1);
+}
+
+
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
int64_t offset = (ref0.address() - ref1.address());
DCHECK(static_cast<int>(offset) == offset);
diff --git a/deps/v8/src/mips64/code-stubs-mips64.h b/deps/v8/src/mips64/code-stubs-mips64.h
index 33c392b2f7..c54a3d07c5 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.h
+++ b/deps/v8/src/mips64/code-stubs-mips64.h
@@ -2,8 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_CODE_STUBS_ARM_H_
-#define V8_MIPS_CODE_STUBS_ARM_H_
+#ifndef V8_MIPS_CODE_STUBS_MIPS64_H_
+#define V8_MIPS_CODE_STUBS_MIPS64_H_
+
+#include "src/mips64/frames-mips64.h"
namespace v8 {
namespace internal {
@@ -344,4 +346,4 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
} } // namespace v8::internal
-#endif // V8_MIPS_CODE_STUBS_ARM_H_
+#endif // V8_MIPS_CODE_STUBS_MIPS64_H_
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 4f45b08018..7c61f71621 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/codegen.h"
@@ -1082,10 +1080,9 @@ CodeAgingHelper::CodeAgingHelper() {
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before MIPS simulator ICache is setup.
- SmartPointer<CodePatcher> patcher(
- new CodePatcher(young_sequence_.start(),
- young_sequence_.length() / Assembler::kInstrSize,
- CodePatcher::DONT_FLUSH));
+ base::SmartPointer<CodePatcher> patcher(new CodePatcher(
+ young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->Push(ra, fp, cp, a1);
patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
diff --git a/deps/v8/src/mips64/codegen-mips64.h b/deps/v8/src/mips64/codegen-mips64.h
index b02ec4ff10..f79ad4e41c 100644
--- a/deps/v8/src/mips64/codegen-mips64.h
+++ b/deps/v8/src/mips64/codegen-mips64.h
@@ -14,9 +14,6 @@ namespace v8 {
namespace internal {
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
diff --git a/deps/v8/src/mips64/constants-mips64.cc b/deps/v8/src/mips64/constants-mips64.cc
index fd183a7b01..b43601c5cf 100644
--- a/deps/v8/src/mips64/constants-mips64.cc
+++ b/deps/v8/src/mips64/constants-mips64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/mips64/constants-mips64.h"
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index 0284478c08..898a4dbb1d 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -5,6 +5,10 @@
#ifndef V8_MIPS_CONSTANTS_H_
#define V8_MIPS_CONSTANTS_H_
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/globals.h"
+
// UNIMPLEMENTED_ macro for MIPS.
#ifdef DEBUG
#define UNIMPLEMENTED_MIPS() \
@@ -282,6 +286,8 @@ const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
const int64_t kHi16MaskOf64 = (int64_t)0xffff << 48;
const int64_t kSe16MaskOf64 = (int64_t)0xffff << 32;
const int64_t kTh16MaskOf64 = (int64_t)0xffff << 16;
+const int32_t kJalRawMark = 0x00000000;
+const int32_t kJRawMark = 0xf0000000;
// ----- MIPS Opcodes and Function Fields.
// We use this presentation to stay close to the table representation in
diff --git a/deps/v8/src/mips64/cpu-mips64.cc b/deps/v8/src/mips64/cpu-mips64.cc
index 9c600bfa67..6c24fd06a9 100644
--- a/deps/v8/src/mips64/cpu-mips64.cc
+++ b/deps/v8/src/mips64/cpu-mips64.cc
@@ -11,8 +11,6 @@
#include <asm/cachectl.h>
#endif // #ifdef __mips
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/assembler.h"
diff --git a/deps/v8/src/mips64/debug-mips64.cc b/deps/v8/src/mips64/debug-mips64.cc
deleted file mode 100644
index 75e37c5e77..0000000000
--- a/deps/v8/src/mips64/debug-mips64.cc
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_MIPS64
-
-#include "src/codegen.h"
-#include "src/debug.h"
-
-namespace v8 {
-namespace internal {
-
-void BreakLocation::SetDebugBreakAtReturn() {
- // Mips return sequence:
- // mov sp, fp
- // lw fp, sp(0)
- // lw ra, sp(4)
- // addiu sp, sp, 8
- // addiu sp, sp, N
- // jr ra
- // nop (in branch delay slot)
-
- // Make sure this constant matches the number if instructions we emit.
- DCHECK(Assembler::kJSReturnSequenceInstructions == 7);
- CodePatcher patcher(pc(), Assembler::kJSReturnSequenceInstructions);
- // li and Call pseudo-instructions emit 6 + 2 instructions.
- patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int64_t>(
- debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry())),
- ADDRESS_LOAD);
- patcher.masm()->Call(v8::internal::t9);
- // Place nop to match return sequence size.
- patcher.masm()->nop();
- // TODO(mips): Open issue about using breakpoint instruction instead of nops.
- // patcher.masm()->bkpt(0);
-}
-
-
-void BreakLocation::SetDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- // Patch the code changing the debug break slot code from:
- // nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
- // nop(DEBUG_BREAK_NOP)
- // nop(DEBUG_BREAK_NOP)
- // nop(DEBUG_BREAK_NOP)
- // nop(DEBUG_BREAK_NOP)
- // nop(DEBUG_BREAK_NOP)
- // to a call to the debug break slot code.
- // li t9, address (4-instruction sequence on mips64)
- // call t9 (jalr t9 / nop instruction pair)
- CodePatcher patcher(pc(), Assembler::kDebugBreakSlotInstructions);
- patcher.masm()->li(v8::internal::t9,
- Operand(reinterpret_cast<int64_t>(
- debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry())),
- ADDRESS_LOAD);
- patcher.masm()->Call(v8::internal::t9);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load padding words on stack.
- __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
- __ Dsubu(sp, sp,
- Operand(kPointerSize * LiveEdit::kFramePaddingInitialSize));
- for (int i = LiveEdit::kFramePaddingInitialSize - 1; i >= 0; i--) {
- __ sd(at, MemOperand(sp, kPointerSize * i));
- }
- __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
- __ push(at);
-
-
- // TODO(plind): This needs to be revised to store pairs of smi's per
- // the other 64-bit arch's.
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- DCHECK((object_regs & ~kJSCallerSaved) == 0);
- DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
- DCHECK((object_regs & non_object_regs) == 0);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
- }
- if ((non_object_regs & (1 << r)) != 0) {
- __ PushRegisterAsTwoSmis(reg);
- }
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ PrepareCEntryArgs(0); // No arguments.
- __ PrepareCEntryFunction(ExternalReference::debug_break(masm->isolate()));
-
- CEntryStub ceb(masm->isolate(), 1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- __ PopRegisterAsTwoSmis(reg, at);
- }
- if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
- }
- if (FLAG_debug_code &&
- (((object_regs |non_object_regs) & (1 << r)) == 0)) {
- __ li(reg, kDebugZapValue);
- }
- }
-
- // Don't bother removing padding bytes pushed on the stack
- // as the frame is going to be restored right away.
-
- // Leave the internal frame.
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ li(t9, Operand(after_break_target));
- __ ld(t9, MemOperand(t9));
- __ Jump(t9);
-}
-
-
-void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallICStub
- // ----------- S t a t e -------------
- // -- a1 : function
- // -- a3 : slot in feedback array (smi)
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a3.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // In places other than IC call sites it is expected that v0 is TOS which
- // is an object - this is not generally the case so this should be used with
- // care.
- Generate_DebugBreakCallHelper(masm, v0.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-mips.cc).
- // ----------- S t a t e -------------
- // -- a1 : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-mips.cc).
- // ----------- S t a t e -------------
- // -- a0 : number of arguments (not smi)
- // -- a1 : constructor function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() , a0.bit());
-}
-
-
-
-void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
- MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-mips.cc).
- // ----------- S t a t e -------------
- // -- a0 : number of arguments (not smi)
- // -- a1 : constructor function
- // -- a2 : feedback array
- // -- a3 : feedback slot (smi)
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), a0.bit());
-}
-
-
-void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction. Avoid emitting
- // the trampoline pool in the debug break slot code.
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
- __ nop(MacroAssembler::DEBUG_BREAK_NOP);
- }
- DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
- masm->InstructionsGeneratedSince(&check_codesize));
-}
-
-
-void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0);
-}
-
-
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ li(at, Operand(restarter_frame_function_slot));
- __ sw(zero_reg, MemOperand(at, 0));
-
- // We do not know our frame height, but set sp based on fp.
- __ Dsubu(sp, fp, Operand(kPointerSize));
-
- __ Pop(ra, fp, a1); // Return address, Frame, Function.
-
- // Load context from the function.
- __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Get function code.
- __ ld(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset));
- __ Daddu(t9, at, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Re-run JSFunction, a1 is function, cp is context.
- __ Jump(t9);
-}
-
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index cf5700f334..958951a948 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/codegen.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/safepoint-table.h"
namespace v8 {
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/mips64/disasm-mips64.cc
index ee624db2f8..9639cef4dc 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/mips64/disasm-mips64.cc
@@ -28,8 +28,6 @@
#include <stdio.h>
#include <string.h>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/base/platform/platform.h"
@@ -344,8 +342,10 @@ void Decoder::PrintPCImm21(Instruction* instr, int delta_pc, int n_bits) {
// Print 26-bit hex immediate value.
void Decoder::PrintXImm26(Instruction* instr) {
- uint32_t imm = static_cast<uint32_t>(instr->Imm26Value()) << kImmFieldShift;
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+ uint64_t target = static_cast<uint64_t>(instr->Imm26Value())
+ << kImmFieldShift;
+ target = (reinterpret_cast<uint64_t>(instr) & ~0xfffffff) | target;
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%lx", target);
}
@@ -1073,7 +1073,7 @@ void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) {
Format(instr, "jr 'rs");
break;
case JALR:
- Format(instr, "jalr 'rs");
+ Format(instr, "jalr 'rs, 'rd");
break;
case SLL:
if (0x0 == static_cast<int>(instr->InstructionBits()))
@@ -1510,9 +1510,13 @@ void Decoder::DecodeTypeImmediateREGIMM(Instruction* instr) {
case BGEZ:
Format(instr, "bgez 'rs, 'imm16u -> 'imm16p4s2");
break;
- case BGEZAL:
- Format(instr, "bgezal 'rs, 'imm16u -> 'imm16p4s2");
+ case BGEZAL: {
+ if (instr->RsValue() == 0)
+ Format(instr, "bal 'imm16s -> 'imm16p4s2");
+ else
+ Format(instr, "bgezal 'rs, 'imm16u -> 'imm16p4s2");
break;
+ }
case BGEZALL:
Format(instr, "bgezall 'rs, 'imm16u -> 'imm16p4s2");
break;
diff --git a/deps/v8/src/mips64/frames-mips64.cc b/deps/v8/src/mips64/frames-mips64.cc
index 2f0436184e..5427367d47 100644
--- a/deps/v8/src/mips64/frames-mips64.cc
+++ b/deps/v8/src/mips64/frames-mips64.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/assembler.h"
diff --git a/deps/v8/src/mips64/frames-mips64.h b/deps/v8/src/mips64/frames-mips64.h
index 4434a98b7f..9b6d326275 100644
--- a/deps/v8/src/mips64/frames-mips64.h
+++ b/deps/v8/src/mips64/frames-mips64.h
@@ -169,12 +169,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
} } // namespace v8::internal
#endif
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 689bbbb9ee..6f1201d26a 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/interface-descriptors.h"
@@ -36,7 +34,11 @@ const Register VectorStoreICDescriptor::VectorRegister() { return a3; }
const Register StoreTransitionDescriptor::MapRegister() { return a3; }
-const Register ElementTransitionAndStoreDescriptor::MapRegister() { return a3; }
+const Register LoadGlobalViaContextDescriptor::SlotRegister() { return a2; }
+
+
+const Register StoreGlobalViaContextDescriptor::SlotRegister() { return a2; }
+const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
const Register InstanceofDescriptor::left() { return a0; }
@@ -62,6 +64,14 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
+void StoreTransitionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ MapRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2};
@@ -83,6 +93,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
}
+// static
+const Register ToObjectDescriptor::ReceiverRegister() { return a0; }
+
+
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a0};
@@ -158,11 +172,11 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// a0 : number of arguments
// a1 : the function to call
// a2 : feedback vector
- // a3 : (only if a2 is not the megamorphic symbol) slot in feedback
- // vector (Smi)
+ // a3 : slot in feedback vector (Smi, for RecordCallTarget)
+ // a4 : original constructor (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {a0, a1, a2};
+ Register registers[] = {a0, a1, a4, a2};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
@@ -330,11 +344,22 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
+void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a1, // math rounding function
+ a3, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void MathRoundVariantCallFromOptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
a1, // math rounding function
a3, // vector slot id
+ a2, // type vector
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/mips64/lithium-codegen-mips64.cc b/deps/v8/src/mips64/lithium-codegen-mips64.cc
index 1273d856cc..77813d50cb 100644
--- a/deps/v8/src/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/mips64/lithium-codegen-mips64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/cpu-profiler.h"
@@ -105,7 +103,7 @@ bool LCodeGen::GeneratePrologue() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop_at");
}
#endif
@@ -407,6 +405,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
+ AllowDeferredHandleDereference get_number;
DCHECK(literal->IsNumber());
__ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
} else if (r.IsSmi()) {
@@ -634,15 +633,23 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
if (op->IsStackSlot()) {
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
if (is_tagged) {
- translation->StoreStackSlot(op->index());
+ translation->StoreStackSlot(index);
} else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
+ translation->StoreUint32StackSlot(index);
} else {
- translation->StoreInt32StackSlot(op->index());
+ translation->StoreInt32StackSlot(index);
}
} else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
+ translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -1214,7 +1221,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// On MIPS div is asynchronous - it will run in the background while we
// check for special cases.
- __ Ddiv(result, dividend, divisor);
+ __ Div(result, dividend, divisor);
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
@@ -2281,6 +2288,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
}
+ if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ // SIMD value -> true.
+ const Register scratch = scratch1();
+ __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(instr->TrueLabel(chunk_), eq, scratch,
+ Operand(SIMD128_VALUE_TYPE));
+ }
+
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
DoubleRegister dbl_scratch = double_scratch0();
@@ -2983,13 +2998,31 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
__ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
- PREMONOMORPHIC).code();
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+ SLOPPY, PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->result()).is(v0));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ li(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ Handle<Code> stub =
+ CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3101,7 +3134,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3204,10 +3237,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
: element_size_shift;
int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
FPURegister result = ToDoubleRegister(instr->result());
if (key_is_constant) {
__ Daddu(scratch0(), external_pointer,
@@ -3224,8 +3254,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
}
__ Daddu(scratch0(), scratch0(), external_pointer);
}
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
__ lwc1(result, MemOperand(scratch0(), base_offset));
__ cvt_d_s(result, result);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
@@ -3237,29 +3266,22 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
key, external_pointer, key_is_constant, constant_key,
element_size_shift, shift_size, base_offset);
switch (elements_kind) {
- case EXTERNAL_INT8_ELEMENTS:
case INT8_ELEMENTS:
__ lb(result, mem_operand);
break;
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
__ lbu(result, mem_operand);
break;
- case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
__ lh(result, mem_operand);
break;
- case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
__ lhu(result, mem_operand);
break;
- case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
__ lw(result, mem_operand);
break;
- case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
@@ -3269,8 +3291,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -3408,7 +3428,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3672,9 +3692,8 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
__ li(scratch0(), instr->hydrogen()->pairs());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
- // The context is the first argument.
- __ Push(cp, scratch0(), scratch1());
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ __ Push(scratch0(), scratch1());
+ CallRuntime(Runtime::kDeclareGlobals, 2, instr);
}
@@ -4372,6 +4391,30 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->value())
+ .is(StoreGlobalViaContextDescriptor::ValueRegister()));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ li(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
+ isolate(), depth, instr->language_mode())
+ .code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
Operand operand((int64_t)0);
@@ -4415,10 +4458,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
: element_size_shift;
int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
Register address = scratch0();
FPURegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
@@ -4441,8 +4481,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
__ Daddu(address, external_pointer, address);
}
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
__ cvt_s_d(double_scratch0(), value);
__ swc1(double_scratch0(), MemOperand(address, base_offset));
} else { // Storing doubles, not floats.
@@ -4455,30 +4494,21 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
element_size_shift, shift_size,
base_offset);
switch (elements_kind) {
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_INT8_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
case INT8_ELEMENTS:
__ sb(value, mem_operand);
break;
- case EXTERNAL_INT16_ELEMENTS:
- case EXTERNAL_UINT16_ELEMENTS:
case INT16_ELEMENTS:
case UINT16_ELEMENTS:
__ sh(value, mem_operand);
break;
- case EXTERNAL_INT32_ELEMENTS:
- case EXTERNAL_UINT32_ELEMENTS:
case INT32_ELEMENTS:
case UINT32_ELEMENTS:
__ sw(value, mem_operand);
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -4605,7 +4635,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases: external, fast double
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -5834,15 +5864,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
} else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label);
__ GetObjectType(input, input, scratch);
- __ Branch(USE_DELAY_SLOT, false_label,
- ge, scratch, Operand(FIRST_NONSTRING_TYPE));
- // input is an object so we can load the BitFieldOffset even if we take the
- // other branch.
- __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
- __ And(at, at, 1 << Map::kIsUndetectable);
- *cmp1 = at;
- *cmp2 = Operand(zero_reg);
- final_branch_condition = eq;
+ *cmp1 = scratch;
+ *cmp2 = Operand(FIRST_NONSTRING_TYPE);
+ final_branch_condition = lt;
} else if (String::Equals(type_name, factory->symbol_string())) {
__ JumpIfSmi(input, false_label);
@@ -5900,6 +5924,20 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
*cmp2 = Operand(zero_reg);
final_branch_condition = eq;
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(type_name, factory->type##_string())) { \
+ __ JumpIfSmi(input, false_label); \
+ __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset)); \
+ __ LoadRoot(at, Heap::k##Type##MapRootIndex); \
+ *cmp1 = input; \
+ *cmp2 = Operand(at); \
+ final_branch_condition = eq;
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
+
+
} else {
*cmp1 = at;
*cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
diff --git a/deps/v8/src/mips64/lithium-gap-resolver-mips64.cc b/deps/v8/src/mips64/lithium-gap-resolver-mips64.cc
index 1006d72a4e..9e3114bc34 100644
--- a/deps/v8/src/mips64/lithium-gap-resolver-mips64.cc
+++ b/deps/v8/src/mips64/lithium-gap-resolver-mips64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/mips64/lithium-codegen-mips64.h"
#include "src/mips64/lithium-gap-resolver-mips64.h"
diff --git a/deps/v8/src/mips64/lithium-gap-resolver-mips64.h b/deps/v8/src/mips64/lithium-gap-resolver-mips64.h
index ab950559c5..7374da7727 100644
--- a/deps/v8/src/mips64/lithium-gap-resolver-mips64.h
+++ b/deps/v8/src/mips64/lithium-gap-resolver-mips64.h
@@ -5,8 +5,6 @@
#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
-#include "src/v8.h"
-
#include "src/lithium.h"
namespace v8 {
diff --git a/deps/v8/src/mips64/lithium-mips64.cc b/deps/v8/src/mips64/lithium-mips64.cc
index 3df4defc7a..26a03fcc67 100644
--- a/deps/v8/src/mips64/lithium-mips64.cc
+++ b/deps/v8/src/mips64/lithium-mips64.cc
@@ -4,8 +4,6 @@
#include <sstream>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/hydrogen-osr.h"
@@ -337,6 +335,11 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
+void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d", depth(), slot_index());
+}
+
+
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -355,6 +358,12 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
+void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
+ value()->PrintTo(stream);
+}
+
+
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -1616,8 +1625,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsExternal()) {
- DCHECK(instr->left()->representation().IsExternal());
- DCHECK(instr->right()->representation().IsInteger32());
+ DCHECK(instr->IsConsistentExternalRepresentation());
DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
@@ -2100,6 +2108,15 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
+ HLoadGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ DCHECK(instr->slot_index() > 0);
+ LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2168,7 +2185,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LInstruction* result = NULL;
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseRegister(instr->elements());
@@ -2189,10 +2206,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
bool needs_environment;
- if (instr->is_external() || instr->is_fixed_typed_array()) {
+ if (instr->is_fixed_typed_array()) {
// see LCodeGen::DoLoadKeyedExternalArray
- needs_environment = (elements_kind == EXTERNAL_UINT32_ELEMENTS ||
- elements_kind == UINT32_ELEMENTS) &&
+ needs_environment = elements_kind == UINT32_ELEMENTS &&
!instr->CheckFlag(HInstruction::kUint32);
} else {
// see LCodeGen::DoLoadKeyedFixedDoubleArray and
@@ -2227,7 +2243,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
DCHECK(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
@@ -2260,10 +2276,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(instr->elements_kind())));
- DCHECK((instr->is_fixed_typed_array() &&
- instr->elements()->representation().IsTagged()) ||
- (instr->is_external() &&
- instr->elements()->representation().IsExternal()));
+ DCHECK(instr->elements()->representation().IsExternal());
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* backing_store = UseRegister(instr->elements());
@@ -2389,6 +2402,19 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
+ HStoreGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* value = UseFixed(instr->value(),
+ StoreGlobalViaContextDescriptor::ValueRegister());
+ DCHECK(instr->slot_index() > 0);
+
+ LStoreGlobalViaContext* result =
+ new (zone()) LStoreGlobalViaContext(context, value);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), a1);
diff --git a/deps/v8/src/mips64/lithium-mips64.h b/deps/v8/src/mips64/lithium-mips64.h
index cb1f56ecc6..71ce5496ac 100644
--- a/deps/v8/src/mips64/lithium-mips64.h
+++ b/deps/v8/src/mips64/lithium-mips64.h
@@ -104,6 +104,7 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
+ V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -144,6 +145,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
+ V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1670,15 +1672,9 @@ class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- bool is_external() const {
- return hydrogen()->is_external();
- }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@@ -1725,7 +1721,23 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
+ TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+
+class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ LOperand* context() { return inputs_[0]; }
+
+ int depth() const { return hydrogen()->depth(); }
+ int slot_index() const { return hydrogen()->slot_index(); }
};
@@ -2214,6 +2226,28 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
+class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreGlobalViaContext(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
+ "store-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int depth() { return hydrogen()->depth(); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
@@ -2222,13 +2256,9 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
inputs_[2] = value;
}
- bool is_external() const { return hydrogen()->is_external(); }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index f7a77dd1b1..006f15b967 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -4,15 +4,13 @@
#include <limits.h> // For LONG_MIN, LONG_MAX.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -167,6 +165,9 @@ void MacroAssembler::InNewSpace(Register object,
}
+// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
+// The register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
void MacroAssembler::RecordWriteField(
Register object,
int offset,
@@ -220,8 +221,7 @@ void MacroAssembler::RecordWriteField(
}
-// Will clobber 4 registers: object, map, dst, ip. The
-// register 'object' contains a heap object pointer.
+// Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
void MacroAssembler::RecordWriteForMap(Register object,
Register map,
Register dst,
@@ -295,8 +295,8 @@ void MacroAssembler::RecordWriteForMap(Register object,
}
-// Will clobber 4 registers: object, address, scratch, ip. The
-// register 'object' contains a heap object pointer. The heap object
+// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
+// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
void MacroAssembler::RecordWrite(
Register object,
@@ -2755,7 +2755,7 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- J(L, bdslot);
+ Jal(L, bdslot);
bind(&skip);
}
} else {
@@ -3193,15 +3193,12 @@ void MacroAssembler::Ret(Condition cond,
void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
-
- uint64_t imm28;
- imm28 = jump_address(L);
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- j(imm28);
+ j(L);
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT) nop();
@@ -3210,15 +3207,12 @@ void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
void MacroAssembler::Jal(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
-
- uint64_t imm28;
- imm28 = jump_address(L);
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- jal(imm28);
+ jal(L);
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT) nop();
@@ -3361,10 +3355,11 @@ void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
void MacroAssembler::DebugBreak() {
PrepareCEntryArgs(0);
- PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
+ PrepareCEntryFunction(
+ ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
CEntryStub ces(isolate(), 1);
DCHECK(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
}
@@ -3569,26 +3564,6 @@ void MacroAssembler::Allocate(Register object_size,
}
-void MacroAssembler::UndoAllocationInNewSpace(Register object,
- Register scratch) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- And(object, object, Operand(~kHeapObjectTagMask));
-#ifdef DEBUG
- // Check that the object un-allocated is below the current top.
- li(scratch, Operand(new_space_allocation_top));
- ld(scratch, MemOperand(scratch));
- Check(less, kUndoAllocationOfNonAllocatedMemory,
- object, Operand(scratch));
-#endif
- // Write the address of the object to un-allocate as the current top.
- li(scratch, Operand(new_space_allocation_top));
- sd(object, MemOperand(scratch));
-}
-
-
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -4716,9 +4691,9 @@ void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
}
}
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments,
- SaveFPRegsMode save_doubles) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles,
+ BranchDelaySlot bd) {
// All parameters are on the stack. v0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -4733,7 +4708,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference(f, isolate()));
CEntryStub stub(isolate(), 1, save_doubles);
- CallStub(&stub);
+ CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
}
@@ -6236,19 +6211,28 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
DCHECK(!scratch1.is(scratch0));
Factory* factory = isolate()->factory();
Register current = scratch0;
- Label loop_again;
+ Label loop_again, end;
// Scratch contained elements pointer.
Move(current, object);
+ ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ Branch(&end, eq, current, Operand(factory->null_value()));
// Loop based on the map going up the prototype chain.
bind(&loop_again);
ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
+ Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
Branch(&loop_again, ne, current, Operand(factory->null_value()));
+
+ bind(&end);
}
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 7de3300908..f2d36e22e2 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -12,6 +12,19 @@
namespace v8 {
namespace internal {
+// Give alias names to registers for calling conventions.
+const Register kReturnRegister0 = {kRegister_v0_Code};
+const Register kReturnRegister1 = {kRegister_v1_Code};
+const Register kJSFunctionRegister = {kRegister_a1_Code};
+const Register kContextRegister = {kRegister_s7_Code};
+const Register kInterpreterAccumulatorRegister = {kRegister_v0_Code};
+const Register kInterpreterRegisterFileRegister = {kRegister_a7_Code};
+const Register kInterpreterBytecodeOffsetRegister = {kRegister_t0_Code};
+const Register kInterpreterBytecodeArrayRegister = {kRegister_t1_Code};
+const Register kInterpreterDispatchTableRegister = {kRegister_t2_Code};
+const Register kRuntimeCallFunctionRegister = {kRegister_a1_Code};
+const Register kRuntimeCallArgCountRegister = {kRegister_a0_Code};
+
// Forward declaration.
class JumpTarget;
@@ -530,13 +543,6 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. The caller must make sure that no pointers
- // are left to the object(s) no longer allocated as they would be invalid when
- // allocation is undone.
- void UndoAllocationInNewSpace(Register object, Register scratch);
-
-
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -706,6 +712,17 @@ class MacroAssembler: public Assembler {
sd(src4, MemOperand(sp, 0 * kPointerSize));
}
+ // Push five registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4,
+ Register src5) {
+ Dsubu(sp, sp, Operand(5 * kPointerSize));
+ sd(src1, MemOperand(sp, 4 * kPointerSize));
+ sd(src2, MemOperand(sp, 3 * kPointerSize));
+ sd(src3, MemOperand(sp, 2 * kPointerSize));
+ sd(src4, MemOperand(sp, 1 * kPointerSize));
+ sd(src5, MemOperand(sp, 0 * kPointerSize));
+ }
+
void Push(Register src, Condition cond, Register tst1, Register tst2) {
// Since we don't have conditional execution we use a Branch.
Branch(3, cond, tst1, Operand(tst2));
@@ -1275,19 +1292,19 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void CallJSExitStub(CodeStub* stub);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f,
- int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs,
+ BranchDelaySlot bd = PROTECT);
void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ void CallRuntime(Runtime::FunctionId id, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs,
+ BranchDelaySlot bd = PROTECT) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles, bd);
}
// Convenience function: call an external reference.
@@ -1796,7 +1813,7 @@ class CodePatcher {
CodePatcher(byte* address,
int instructions,
FlushICache flush_cache = FLUSH);
- virtual ~CodePatcher();
+ ~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 2382f44fb8..9a0d8fdce8 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -7,8 +7,6 @@
#include <stdlib.h>
#include <cmath>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
#include "src/assembler.h"
@@ -1802,9 +1800,15 @@ void Simulator::WriteB(int64_t addr, int8_t value) {
// Returns the limit of the stack area to enable checking for stack overflows.
-uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
- // pushing values.
+uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
+ // The simulator uses a separate JS stack. If we have exhausted the C stack,
+ // we also drop down the JS limit to reflect the exhaustion on the JS stack.
+ if (GetCurrentStackPosition() < c_limit) {
+ return reinterpret_cast<uintptr_t>(get_sp());
+ }
+
+ // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
+ // to prevent overrunning the stack when pushing values.
return reinterpret_cast<uintptr_t>(stack_) + 1024;
}
@@ -3791,17 +3795,20 @@ void Simulator::DecodeTypeRegisterSPECIAL(
break;
case DSLL:
set_register(rd_reg, alu_out);
+ TraceRegWr(alu_out);
break;
case DIV:
- case DDIV:
+ case DDIV: {
+ const int64_t int_min_value =
+ instr->FunctionFieldRaw() == DIV ? INT_MIN : LONG_MIN;
switch (kArchVariant) {
case kMips64r2:
// Divide by zero and overflow was not checked in the
// configuration step - div and divu do not raise exceptions. On
// division by 0 the result will be UNPREDICTABLE. On overflow
// (INT_MIN/-1), return INT_MIN which is what the hardware does.
- if (rs == INT_MIN && rt == -1) {
- set_register(LO, INT_MIN);
+ if (rs == int_min_value && rt == -1) {
+ set_register(LO, int_min_value);
set_register(HI, 0);
} else if (rt != 0) {
set_register(LO, rs / rt);
@@ -3811,14 +3818,14 @@ void Simulator::DecodeTypeRegisterSPECIAL(
case kMips64r6:
switch (instr->SaValue()) {
case DIV_OP:
- if (rs == INT_MIN && rt == -1) {
- set_register(rd_reg, INT_MIN);
+ if (rs == int_min_value && rt == -1) {
+ set_register(rd_reg, int_min_value);
} else if (rt != 0) {
set_register(rd_reg, rs / rt);
}
break;
case MOD_OP:
- if (rs == INT_MIN && rt == -1) {
+ if (rs == int_min_value && rt == -1) {
set_register(rd_reg, 0);
} else if (rt != 0) {
set_register(rd_reg, rs % rt);
@@ -3833,6 +3840,7 @@ void Simulator::DecodeTypeRegisterSPECIAL(
break;
}
break;
+ }
case DIVU:
if (rt_u != 0) {
set_register(LO, rs_u / rt_u);
@@ -4412,7 +4420,6 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
UNREACHABLE();
}
-
// ---------- Raise exceptions triggered.
SignalExceptions();
@@ -4619,6 +4626,9 @@ void Simulator::Execute() {
void Simulator::CallInternal(byte* entry) {
+ // Adjust JS-based stack limit to C-based stack limit.
+ isolate_->stack_guard()->AdjustStackLimitForSimulator();
+
// Prepare to execute the code at entry.
set_register(pc, reinterpret_cast<int64_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index 346d3584f4..dea9e30adf 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -223,12 +223,12 @@ class Simulator {
void set_pc(int64_t value);
int64_t get_pc() const;
- Address get_sp() {
+ Address get_sp() const {
return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
}
// Accessor to the internal simulator stack area.
- uintptr_t StackLimit() const;
+ uintptr_t StackLimit(uintptr_t c_limit) const;
// Executes MIPS instructions until the PC reaches end_sim_pc.
void Execute();
@@ -411,6 +411,7 @@ class Simulator {
instr->OpcodeValue());
}
InstructionDecode(instr);
+ SNPrintF(trace_buf_, " ");
}
// ICache.
@@ -508,15 +509,14 @@ class Simulator {
// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. Setting the c_limit to indicate a very small
-// stack cause stack overflow errors, since the simulator ignores the input.
-// This is unlikely to be an issue in practice, though it might cause testing
-// trouble down the line.
+// the C-based native code. The JS-based limit normally points near the end of
+// the simulator stack. When the C-based limit is exhausted we reflect that by
+// lowering the JS-based limit as well, to make stack checks trigger.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit();
+ return Simulator::current(isolate)->StackLimit(c_limit);
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
diff --git a/deps/v8/src/modules.cc b/deps/v8/src/modules.cc
index 2e6cfc0723..f72693cd66 100644
--- a/deps/v8/src/modules.cc
+++ b/deps/v8/src/modules.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/modules.h"
#include "src/ast-value-factory.h"
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js
index 9c49fd38fb..af01a18a8f 100644
--- a/deps/v8/src/object-observe.js
+++ b/deps/v8/src/object-observe.js
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $observeNotifyChange;
var $observeEnqueueSpliceRecord;
var $observeBeginPerformSplice;
var $observeEndPerformSplice;
-var $observeNativeObjectObserve;
-var $observeNativeObjectGetNotifier;
-var $observeNativeObjectNotifierPerformChange;
+
+var $observeObjectMethods;
+var $observeArrayMethods;
(function(global, utils) {
@@ -680,27 +679,43 @@ function ObserveMicrotaskRunner() {
// -------------------------------------------------------------------
-utils.InstallFunctions(GlobalObject, DONT_ENUM, [
+utils.InstallFunctions(notifierPrototype, DONT_ENUM, [
+ "notify", ObjectNotifierNotify,
+ "performChange", ObjectNotifierPerformChange
+]);
+
+$observeObjectMethods = [
"deliverChangeRecords", ObjectDeliverChangeRecords,
"getNotifier", ObjectGetNotifier,
"observe", ObjectObserve,
"unobserve", ObjectUnobserve
-]);
-utils.InstallFunctions(GlobalArray, DONT_ENUM, [
+];
+$observeArrayMethods = [
"observe", ArrayObserve,
"unobserve", ArrayUnobserve
-]);
-utils.InstallFunctions(notifierPrototype, DONT_ENUM, [
- "notify", ObjectNotifierNotify,
- "performChange", ObjectNotifierPerformChange
-]);
+];
+
+// TODO(adamk): Figure out why this prototype removal has to
+// happen as part of initial snapshotting.
+var removePrototypeFn = function(f, i) {
+ if (i % 2 === 1) %FunctionRemovePrototype(f);
+};
+$observeObjectMethods.forEach(removePrototypeFn);
+$observeArrayMethods.forEach(removePrototypeFn);
-$observeNotifyChange = NotifyChange;
$observeEnqueueSpliceRecord = EnqueueSpliceRecord;
$observeBeginPerformSplice = BeginPerformSplice;
$observeEndPerformSplice = EndPerformSplice;
-$observeNativeObjectObserve = NativeObjectObserve;
-$observeNativeObjectGetNotifier = NativeObjectGetNotifier;
-$observeNativeObjectNotifierPerformChange = NativeObjectNotifierPerformChange;
+
+utils.ExportToRuntime(function(to) {
+ to.ObserveNotifyChange = NotifyChange;
+ to.ObserveEnqueueSpliceRecord = EnqueueSpliceRecord;
+ to.ObserveBeginPerformSplice = BeginPerformSplice;
+ to.ObserveEndPerformSplice = EndPerformSplice;
+ to.ObserveNativeObjectObserve = NativeObjectObserve;
+ to.ObserveNativeObjectGetNotifier = NativeObjectGetNotifier;
+ to.ObserveNativeObjectNotifierPerformChange =
+ NativeObjectNotifierPerformChange;
+});
})
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 3474ebd8f7..815a5b53f8 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -4,12 +4,12 @@
#include "src/v8.h"
+#include "src/bootstrapper.h"
#include "src/disasm.h"
#include "src/disassembler.h"
-#include "src/heap/objects-visiting.h"
-#include "src/jsregexp.h"
#include "src/macro-assembler.h"
#include "src/ostreams.h"
+#include "src/regexp/jsregexp.h"
namespace v8 {
namespace internal {
@@ -58,8 +58,8 @@ void HeapObject::HeapObjectVerify() {
case MUTABLE_HEAP_NUMBER_TYPE:
HeapNumber::cast(this)->HeapNumberVerify();
break;
- case FLOAT32X4_TYPE:
- Float32x4::cast(this)->Float32x4Verify();
+ case SIMD128_VALUE_TYPE:
+ Simd128Value::cast(this)->Simd128ValueVerify();
break;
case FIXED_ARRAY_TYPE:
FixedArray::cast(this)->FixedArrayVerify();
@@ -70,14 +70,14 @@ void HeapObject::HeapObjectVerify() {
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayVerify();
break;
+ case BYTECODE_ARRAY_TYPE:
+ BytecodeArray::cast(this)->BytecodeArrayVerify();
+ break;
case FREE_SPACE_TYPE:
FreeSpace::cast(this)->FreeSpaceVerify();
break;
#define VERIFY_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ARRAY_TYPE: \
- External##Type##Array::cast(this)->External##Type##ArrayVerify(); \
- break; \
case FIXED_##TYPE##_ARRAY_TYPE: \
Fixed##Type##Array::cast(this)->FixedTypedArrayVerify(); \
break;
@@ -214,7 +214,7 @@ void HeapNumber::HeapNumberVerify() {
}
-void Float32x4::Float32x4Verify() { CHECK(IsFloat32x4()); }
+void Simd128Value::Simd128ValueVerify() { CHECK(IsSimd128Value()); }
void ByteArray::ByteArrayVerify() {
@@ -222,18 +222,15 @@ void ByteArray::ByteArrayVerify() {
}
-void FreeSpace::FreeSpaceVerify() {
- CHECK(IsFreeSpace());
+void BytecodeArray::BytecodeArrayVerify() {
+ // TODO(oth): Walk bytecodes and immediate values to validate sanity.
+ CHECK(IsBytecodeArray());
}
-#define EXTERNAL_ARRAY_VERIFY(Type, type, TYPE, ctype, size) \
- void External##Type##Array::External##Type##ArrayVerify() { \
- CHECK(IsExternal##Type##Array()); \
- }
-
-TYPED_ARRAYS(EXTERNAL_ARRAY_VERIFY)
-#undef EXTERNAL_ARRAY_VERIFY
+void FreeSpace::FreeSpaceVerify() {
+ CHECK(IsFreeSpace());
+}
template <class Traits>
@@ -241,7 +238,12 @@ void FixedTypedArray<Traits>::FixedTypedArrayVerify() {
CHECK(IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() ==
Traits::kInstanceType);
- CHECK(base_pointer() == this);
+ if (base_pointer() == this) {
+ CHECK(external_pointer() ==
+ ExternalReference::fixed_typed_array_base_data_offset().address());
+ } else {
+ CHECK(base_pointer() == nullptr);
+ }
}
@@ -263,7 +265,7 @@ void JSObject::JSObjectVerify() {
}
if (HasFastProperties()) {
- int actual_unused_property_fields = map()->inobject_properties() +
+ int actual_unused_property_fields = map()->GetInObjectProperties() +
properties()->length() -
map()->NextFreePropertyIndex();
if (map()->unused_property_fields() != actual_unused_property_fields) {
@@ -334,7 +336,6 @@ void Map::DictionaryMapVerify() {
MapVerify();
CHECK(is_dictionary_map());
CHECK(instance_descriptors()->IsEmpty());
- CHECK_EQ(0, pre_allocated_property_fields());
CHECK_EQ(0, unused_property_fields());
CHECK_EQ(StaticVisitorBase::GetVisitorId(this), visitor_id());
}
@@ -541,6 +542,8 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
VerifyObjectField(kFeedbackVectorOffset);
VerifyObjectField(kScopeInfoOffset);
VerifyObjectField(kInstanceClassNameOffset);
+ CHECK(function_data()->IsUndefined() || IsApiFunction() ||
+ HasBuiltinFunctionId() || HasBytecodeArray());
VerifyObjectField(kFunctionDataOffset);
VerifyObjectField(kScriptOffset);
VerifyObjectField(kDebugInfoOffset);
@@ -986,32 +989,6 @@ void Script::ScriptVerify() {
}
-void JSFunctionResultCache::JSFunctionResultCacheVerify() {
- JSFunction::cast(get(kFactoryIndex))->ObjectVerify();
-
- int size = Smi::cast(get(kCacheSizeIndex))->value();
- CHECK(kEntriesIndex <= size);
- CHECK(size <= length());
- CHECK_EQ(0, size % kEntrySize);
-
- int finger = Smi::cast(get(kFingerIndex))->value();
- CHECK(kEntriesIndex <= finger);
- CHECK((finger < size) || (finger == kEntriesIndex && finger == size));
- CHECK_EQ(0, finger % kEntrySize);
-
- if (FLAG_enable_slow_asserts) {
- for (int i = kEntriesIndex; i < size; i++) {
- CHECK(!get(i)->IsTheHole());
- get(i)->ObjectVerify();
- }
- for (int i = size; i < length(); i++) {
- CHECK(get(i)->IsTheHole());
- get(i)->ObjectVerify();
- }
- }
-}
-
-
void NormalizedMapCache::NormalizedMapCacheVerify() {
FixedArray::cast(this)->FixedArrayVerify();
if (FLAG_enable_slow_asserts) {
@@ -1030,7 +1007,6 @@ void NormalizedMapCache::NormalizedMapCacheVerify() {
void DebugInfo::DebugInfoVerify() {
CHECK(IsDebugInfo());
VerifyPointer(shared());
- VerifyPointer(original_code());
VerifyPointer(code());
VerifyPointer(break_points());
}
@@ -1087,7 +1063,6 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
}
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: \
case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -1276,6 +1251,62 @@ void Code::VerifyEmbeddedObjects(VerifyMode mode) {
}
+// Verify that the debugger can redirect old code to the new code.
+void Code::VerifyRecompiledCode(Code* old_code, Code* new_code) {
+ if (old_code->kind() != FUNCTION) return;
+ if (new_code->kind() != FUNCTION) return;
+ Isolate* isolate = old_code->GetIsolate();
+ // Do not verify during bootstrapping. We may replace code using %SetCode.
+ if (isolate->bootstrapper()->IsActive()) return;
+
+ static const int mask = RelocInfo::kCodeTargetMask;
+ RelocIterator old_it(old_code, mask);
+ RelocIterator new_it(new_code, mask);
+ Code* stack_check = isolate->builtins()->builtin(Builtins::kStackCheck);
+
+ while (!old_it.done()) {
+ RelocInfo* rinfo = old_it.rinfo();
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ CHECK(!target->is_handler() && !target->is_inline_cache_stub());
+ if (target == stack_check) break;
+ old_it.next();
+ }
+
+ while (!new_it.done()) {
+ RelocInfo* rinfo = new_it.rinfo();
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ CHECK(!target->is_handler() && !target->is_inline_cache_stub());
+ if (target == stack_check) break;
+ new_it.next();
+ }
+
+ // Either both are done because there is no stack check.
+ // Or we are past the prologue for both.
+ CHECK_EQ(new_it.done(), old_it.done());
+
+ // After the prologue, each call in the old code has a corresponding call
+ // in the new code.
+ while (!old_it.done() && !new_it.done()) {
+ Code* old_target =
+ Code::GetCodeFromTargetAddress(old_it.rinfo()->target_address());
+ Code* new_target =
+ Code::GetCodeFromTargetAddress(new_it.rinfo()->target_address());
+ CHECK_EQ(old_target->kind(), new_target->kind());
+ // Check call target for equality unless it's an IC or an interrupt check.
+ // In both cases they may be patched to be something else.
+ if (!old_target->is_handler() && !old_target->is_inline_cache_stub() &&
+ new_target == isolate->builtins()->builtin(Builtins::kInterruptCheck)) {
+ CHECK_EQ(old_target, new_target);
+ }
+ old_it.next();
+ new_it.next();
+ }
+
+ // Both are done at the same time.
+ CHECK_EQ(new_it.done(), old_it.done());
+}
+
+
#endif // DEBUG
} // namespace internal
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 3caf52bff4..b3713b644c 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -20,10 +20,6 @@
#include "src/field-index-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
-#include "src/heap/incremental-marking.h"
-#include "src/heap/objects-visiting.h"
-#include "src/heap/spaces.h"
-#include "src/heap/store-buffer.h"
#include "src/isolate.h"
#include "src/layout-descriptor-inl.h"
#include "src/lookup.h"
@@ -32,6 +28,7 @@
#include "src/prototype.h"
#include "src/transitions-inl.h"
#include "src/type-feedback-vector-inl.h"
+#include "src/types-inl.h"
#include "src/v8memory.h"
namespace v8 {
@@ -141,8 +138,7 @@ int PropertyDetails::field_width_in_words() const {
bool Object::IsFixedArrayBase() const {
- return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase() ||
- IsExternalArray();
+ return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
}
@@ -157,20 +153,20 @@ bool Object::IsExternal() const {
bool Object::IsAccessorInfo() const { return IsExecutableAccessorInfo(); }
-bool Object::IsSmi() const {
- return HAS_SMI_TAG(this);
-}
-
-
-bool Object::IsHeapObject() const {
- return Internals::HasHeapObjectTag(this);
-}
-
-
TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
-TYPE_CHECKER(Float32x4, FLOAT32X4_TYPE)
TYPE_CHECKER(Symbol, SYMBOL_TYPE)
+TYPE_CHECKER(Simd128Value, SIMD128_VALUE_TYPE)
+
+
+#define SIMD128_TYPE_CHECKER(TYPE, Type, type, lane_count, lane_type) \
+ bool Object::Is##Type() const { \
+ return Object::IsHeapObject() && \
+ HeapObject::cast(this)->map() == \
+ HeapObject::cast(this)->GetHeap()->type##_map(); \
+ }
+SIMD128_TYPES(SIMD128_TYPE_CHECKER)
+#undef SIMD128_TYPE_CHECKER
bool Object::IsString() const {
@@ -180,7 +176,9 @@ bool Object::IsString() const {
bool Object::IsName() const {
- return IsString() || IsSymbol();
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ return Object::IsHeapObject() &&
+ HeapObject::cast(this)->map()->instance_type() <= LAST_NAME_TYPE;
}
@@ -270,8 +268,7 @@ bool Object::IsExternalTwoByteString() const {
bool Object::HasValidElements() {
// Dictionary is covered under FixedArray.
- return IsFixedArray() || IsFixedDoubleArray() || IsExternalArray() ||
- IsFixedTypedArrayBase();
+ return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
}
@@ -648,6 +645,7 @@ bool Object::IsNumber() const {
TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
+TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE)
TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
@@ -658,18 +656,8 @@ bool Object::IsFiller() const {
}
-bool Object::IsExternalArray() const {
- if (!Object::IsHeapObject())
- return false;
- InstanceType instance_type =
- HeapObject::cast(this)->map()->instance_type();
- return (instance_type >= FIRST_EXTERNAL_ARRAY_TYPE &&
- instance_type <= LAST_EXTERNAL_ARRAY_TYPE);
-}
-
#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype, size) \
- TYPE_CHECKER(External##Type##Array, EXTERNAL_##TYPE##_ARRAY_TYPE) \
TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
@@ -695,8 +683,7 @@ bool Object::IsJSReceiver() const {
bool Object::IsJSObject() const {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- return IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
+ return IsHeapObject() && HeapObject::cast(this)->map()->IsJSObjectMap();
}
@@ -922,28 +909,6 @@ bool Object::IsStringTable() const {
}
-bool Object::IsJSFunctionResultCache() const {
- if (!IsFixedArray()) return false;
- const FixedArray* self = FixedArray::cast(this);
- int length = self->length();
- if (length < JSFunctionResultCache::kEntriesIndex) return false;
- if ((length - JSFunctionResultCache::kEntriesIndex)
- % JSFunctionResultCache::kEntrySize != 0) {
- return false;
- }
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- // TODO(svenpanne) We use const_cast here and below to break our dependency
- // cycle between the predicates and the verifiers. This can be removed when
- // the verifiers are const-correct, too.
- reinterpret_cast<JSFunctionResultCache*>(const_cast<Object*>(this))->
- JSFunctionResultCacheVerify();
- }
-#endif
- return true;
-}
-
-
bool Object::IsNormalizedMapCache() const {
return NormalizedMapCache::IsNormalizedMapCache(this);
}
@@ -1012,7 +977,7 @@ bool Object::IsOrderedHashMap() const {
bool Object::IsPrimitive() const {
- return IsOddball() || IsNumber() || IsString();
+ return IsSmi() || HeapObject::cast(this)->map()->IsPrimitiveMap();
}
@@ -1132,16 +1097,41 @@ bool Object::IsMinusZero() const {
}
-MaybeHandle<Smi> Object::ToSmi(Isolate* isolate, Handle<Object> object) {
- if (object->IsSmi()) return Handle<Smi>::cast(object);
- if (object->IsHeapNumber()) {
- double value = Handle<HeapNumber>::cast(object)->value();
- int int_value = FastD2I(value);
- if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
- return handle(Smi::FromInt(int_value), isolate);
- }
+Representation Object::OptimalRepresentation() {
+ if (!FLAG_track_fields) return Representation::Tagged();
+ if (IsSmi()) {
+ return Representation::Smi();
+ } else if (FLAG_track_double_fields && IsHeapNumber()) {
+ return Representation::Double();
+ } else if (FLAG_track_computed_fields && IsUninitialized()) {
+ return Representation::None();
+ } else if (FLAG_track_heap_object_fields) {
+ DCHECK(IsHeapObject());
+ return Representation::HeapObject();
+ } else {
+ return Representation::Tagged();
}
- return Handle<Smi>();
+}
+
+
+ElementsKind Object::OptimalElementsKind() {
+ if (IsSmi()) return FAST_SMI_ELEMENTS;
+ if (IsNumber()) return FAST_DOUBLE_ELEMENTS;
+ return FAST_ELEMENTS;
+}
+
+
+bool Object::FitsRepresentation(Representation representation) {
+ if (FLAG_track_fields && representation.IsNone()) {
+ return false;
+ } else if (FLAG_track_fields && representation.IsSmi()) {
+ return IsSmi();
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ return IsMutableHeapNumber() || IsNumber();
+ } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ return IsHeapObject();
+ }
+ return true;
}
@@ -1173,6 +1163,14 @@ MaybeHandle<Object> Object::GetElement(Isolate* isolate, Handle<Object> object,
}
+MaybeHandle<Object> Object::SetElement(Isolate* isolate, Handle<Object> object,
+ uint32_t index, Handle<Object> value,
+ LanguageMode language_mode) {
+ LookupIterator it(isolate, object, index);
+ return SetProperty(&it, value, language_mode, MAY_BE_STORE_FROM_KEYED);
+}
+
+
Handle<Object> Object::GetPrototypeSkipHiddenPrototypes(
Isolate* isolate, Handle<Object> receiver) {
PrototypeIterator iter(isolate, receiver);
@@ -1260,6 +1258,30 @@ MaybeHandle<Object> Object::GetProperty(Isolate* isolate, Handle<Object> object,
#define WRITE_INTPTR_FIELD(p, offset, value) \
(*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
+#define READ_UINT8_FIELD(p, offset) \
+ (*reinterpret_cast<const uint8_t*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_UINT8_FIELD(p, offset, value) \
+ (*reinterpret_cast<uint8_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_INT8_FIELD(p, offset) \
+ (*reinterpret_cast<const int8_t*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_INT8_FIELD(p, offset, value) \
+ (*reinterpret_cast<int8_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_UINT16_FIELD(p, offset) \
+ (*reinterpret_cast<const uint16_t*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_UINT16_FIELD(p, offset, value) \
+ (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_INT16_FIELD(p, offset) \
+ (*reinterpret_cast<const int16_t*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_INT16_FIELD(p, offset, value) \
+ (*reinterpret_cast<int16_t*>(FIELD_ADDR(p, offset)) = value)
+
#define READ_UINT32_FIELD(p, offset) \
(*reinterpret_cast<const uint32_t*>(FIELD_ADDR_CONST(p, offset)))
@@ -1290,12 +1312,6 @@ MaybeHandle<Object> Object::GetProperty(Isolate* isolate, Handle<Object> object,
#define WRITE_INT64_FIELD(p, offset, value) \
(*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value)
-#define READ_SHORT_FIELD(p, offset) \
- (*reinterpret_cast<const uint16_t*>(FIELD_ADDR_CONST(p, offset)))
-
-#define WRITE_SHORT_FIELD(p, offset, value) \
- (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
-
#define READ_BYTE_FIELD(p, offset) \
(*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset)))
@@ -1316,31 +1332,6 @@ Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
}
-int Smi::value() const {
- return Internals::SmiValue(this);
-}
-
-
-Smi* Smi::FromInt(int value) {
- DCHECK(Smi::IsValid(value));
- return reinterpret_cast<Smi*>(Internals::IntToSmi(value));
-}
-
-
-Smi* Smi::FromIntptr(intptr_t value) {
- DCHECK(Smi::IsValid(value));
- int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
- return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag);
-}
-
-
-bool Smi::IsValid(intptr_t value) {
- bool result = Internals::IsValidSmi(value);
- DCHECK_EQ(result, value >= kMinValue && value <= kMaxValue);
- return result;
-}
-
-
MapWord MapWord::FromMap(const Map* map) {
return MapWord(reinterpret_cast<uintptr_t>(map));
}
@@ -1464,17 +1455,6 @@ void HeapObject::synchronized_set_map_word(MapWord map_word) {
}
-HeapObject* HeapObject::FromAddress(Address address) {
- DCHECK_TAG_ALIGNED(address);
- return reinterpret_cast<HeapObject*>(address + kHeapObjectTag);
-}
-
-
-Address HeapObject::address() {
- return reinterpret_cast<Address>(this) - kHeapObjectTag;
-}
-
-
int HeapObject::Size() {
return SizeFromMap(map());
}
@@ -1503,8 +1483,6 @@ HeapObjectContents HeapObject::ContentType() {
} else if (type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
type <= LAST_FIXED_TYPED_ARRAY_TYPE) {
return HeapObjectContents::kMixedValues;
- } else if (type == JS_ARRAY_BUFFER_TYPE) {
- return HeapObjectContents::kMixedValues;
} else if (type <= LAST_DATA_TYPE) {
// TODO(jochen): Why do we claim that Code and Map contain only raw values?
return HeapObjectContents::kRawValues;
@@ -1555,28 +1533,94 @@ int HeapNumber::get_sign() {
}
-float Float32x4::get_lane(int lane) const {
- DCHECK(lane < 4 && lane >= 0);
+bool Simd128Value::Equals(Simd128Value* that) {
+#define SIMD128_VALUE(TYPE, Type, type, lane_count, lane_type) \
+ if (this->Is##Type()) { \
+ if (!that->Is##Type()) return false; \
+ return Type::cast(this)->Equals(Type::cast(that)); \
+ }
+ SIMD128_TYPES(SIMD128_VALUE)
+#undef SIMD128_VALUE
+ return false;
+}
+
+
+#define SIMD128_VALUE_EQUALS(TYPE, Type, type, lane_count, lane_type) \
+ bool Type::Equals(Type* that) { \
+ for (int lane = 0; lane < lane_count; ++lane) { \
+ if (this->get_lane(lane) != that->get_lane(lane)) return false; \
+ } \
+ return true; \
+ }
+SIMD128_TYPES(SIMD128_VALUE_EQUALS)
+#undef SIMD128_VALUE_EQUALS
+
+
#if defined(V8_TARGET_LITTLE_ENDIAN)
- return READ_FLOAT_FIELD(this, kValueOffset + lane * kFloatSize);
+#define SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \
+ lane_type value = \
+ READ_##field_type##_FIELD(this, kValueOffset + lane * field_size);
#elif defined(V8_TARGET_BIG_ENDIAN)
- return READ_FLOAT_FIELD(this, kValueOffset + (3 - lane) * kFloatSize);
+#define SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \
+ lane_type value = READ_##field_type##_FIELD( \
+ this, kValueOffset + (lane_count - lane - 1) * field_size);
#else
#error Unknown byte ordering
#endif
-}
-
-void Float32x4::set_lane(int lane, float value) {
- DCHECK(lane < 4 && lane >= 0);
#if defined(V8_TARGET_LITTLE_ENDIAN)
- WRITE_FLOAT_FIELD(this, kValueOffset + lane * kFloatSize, value);
+#define SIMD128_WRITE_LANE(lane_count, field_type, field_size, value) \
+ WRITE_##field_type##_FIELD(this, kValueOffset + lane * field_size, value);
#elif defined(V8_TARGET_BIG_ENDIAN)
- WRITE_FLOAT_FIELD(this, kValueOffset + (3 - lane) * kFloatSize, value);
+#define SIMD128_WRITE_LANE(lane_count, field_type, field_size, value) \
+ WRITE_##field_type##_FIELD( \
+ this, kValueOffset + (lane_count - lane - 1) * field_size, value);
#else
#error Unknown byte ordering
#endif
-}
+
+#define SIMD128_NUMERIC_LANE_FNS(type, lane_type, lane_count, field_type, \
+ field_size) \
+ lane_type type::get_lane(int lane) const { \
+ DCHECK(lane < lane_count && lane >= 0); \
+ SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \
+ return value; \
+ } \
+ \
+ void type::set_lane(int lane, lane_type value) { \
+ DCHECK(lane < lane_count && lane >= 0); \
+ SIMD128_WRITE_LANE(lane_count, field_type, field_size, value) \
+ }
+
+SIMD128_NUMERIC_LANE_FNS(Float32x4, float, 4, FLOAT, kFloatSize)
+SIMD128_NUMERIC_LANE_FNS(Int32x4, int32_t, 4, INT32, kInt32Size)
+SIMD128_NUMERIC_LANE_FNS(Int16x8, int16_t, 8, INT16, kShortSize)
+SIMD128_NUMERIC_LANE_FNS(Int8x16, int8_t, 16, INT8, kCharSize)
+#undef SIMD128_NUMERIC_LANE_FNS
+
+
+#define SIMD128_BOOLEAN_LANE_FNS(type, lane_type, lane_count, field_type, \
+ field_size) \
+ bool type::get_lane(int lane) const { \
+ DCHECK(lane < lane_count && lane >= 0); \
+ SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \
+ DCHECK(value == 0 || value == -1); \
+ return value != 0; \
+ } \
+ \
+ void type::set_lane(int lane, bool value) { \
+ DCHECK(lane < lane_count && lane >= 0); \
+ int32_t int_val = value ? -1 : 0; \
+ SIMD128_WRITE_LANE(lane_count, field_type, field_size, int_val) \
+ }
+
+SIMD128_BOOLEAN_LANE_FNS(Bool32x4, int32_t, 4, INT32, kInt32Size)
+SIMD128_BOOLEAN_LANE_FNS(Bool16x8, int16_t, 8, INT16, kShortSize)
+SIMD128_BOOLEAN_LANE_FNS(Bool8x16, int8_t, 16, INT8, kCharSize)
+#undef SIMD128_BOOLEAN_LANE_FNS
+
+#undef SIMD128_READ_LANE
+#undef SIMD128_WRITE_LANE
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
@@ -1615,6 +1659,19 @@ void AllocationSite::Initialize() {
}
+bool AllocationSite::IsZombie() { return pretenure_decision() == kZombie; }
+
+
+bool AllocationSite::IsMaybeTenure() {
+ return pretenure_decision() == kMaybeTenure;
+}
+
+
+bool AllocationSite::PretenuringDecisionMade() {
+ return pretenure_decision() != kUndecided;
+}
+
+
void AllocationSite::MarkZombie() {
DCHECK(!IsZombie());
Initialize();
@@ -1622,6 +1679,41 @@ void AllocationSite::MarkZombie() {
}
+ElementsKind AllocationSite::GetElementsKind() {
+ DCHECK(!SitePointsToLiteral());
+ int value = Smi::cast(transition_info())->value();
+ return ElementsKindBits::decode(value);
+}
+
+
+void AllocationSite::SetElementsKind(ElementsKind kind) {
+ int value = Smi::cast(transition_info())->value();
+ set_transition_info(Smi::FromInt(ElementsKindBits::update(value, kind)),
+ SKIP_WRITE_BARRIER);
+}
+
+
+bool AllocationSite::CanInlineCall() {
+ int value = Smi::cast(transition_info())->value();
+ return DoNotInlineBit::decode(value) == 0;
+}
+
+
+void AllocationSite::SetDoNotInlineCall() {
+ int value = Smi::cast(transition_info())->value();
+ set_transition_info(Smi::FromInt(DoNotInlineBit::update(value, true)),
+ SKIP_WRITE_BARRIER);
+}
+
+
+bool AllocationSite::SitePointsToLiteral() {
+ // If transition_info is a smi, then it represents an ElementsKind
+ // for a constructed array. Otherwise, it must be a boilerplate
+ // for an object or array literal.
+ return transition_info()->IsJSArray() || transition_info()->IsJSObject();
+}
+
+
// Heuristic: We only need to create allocation site info if the boilerplate
// elements kind is the initial elements kind.
AllocationSiteMode AllocationSite::GetMode(
@@ -1657,6 +1749,39 @@ inline bool AllocationSite::CanTrack(InstanceType type) {
}
+AllocationSite::PretenureDecision AllocationSite::pretenure_decision() {
+ int value = pretenure_data()->value();
+ return PretenureDecisionBits::decode(value);
+}
+
+
+void AllocationSite::set_pretenure_decision(PretenureDecision decision) {
+ int value = pretenure_data()->value();
+ set_pretenure_data(
+ Smi::FromInt(PretenureDecisionBits::update(value, decision)),
+ SKIP_WRITE_BARRIER);
+}
+
+
+bool AllocationSite::deopt_dependent_code() {
+ int value = pretenure_data()->value();
+ return DeoptDependentCodeBit::decode(value);
+}
+
+
+void AllocationSite::set_deopt_dependent_code(bool deopt) {
+ int value = pretenure_data()->value();
+ set_pretenure_data(Smi::FromInt(DeoptDependentCodeBit::update(value, deopt)),
+ SKIP_WRITE_BARRIER);
+}
+
+
+int AllocationSite::memento_found_count() {
+ int value = pretenure_data()->value();
+ return MementoFoundCountBits::decode(value);
+}
+
+
inline void AllocationSite::set_memento_found_count(int count) {
int value = pretenure_data()->value();
// Verify that we can count more mementos than we can possibly find in one
@@ -1670,6 +1795,17 @@ inline void AllocationSite::set_memento_found_count(int count) {
SKIP_WRITE_BARRIER);
}
+
+int AllocationSite::memento_create_count() {
+ return pretenure_create_count()->value();
+}
+
+
+void AllocationSite::set_memento_create_count(int count) {
+ set_pretenure_create_count(Smi::FromInt(count), SKIP_WRITE_BARRIER);
+}
+
+
inline bool AllocationSite::IncrementMementoFoundCount() {
if (IsZombie()) return false;
@@ -1743,6 +1879,18 @@ inline bool AllocationSite::DigestPretenuringFeedback(
}
+bool AllocationMemento::IsValid() {
+ return allocation_site()->IsAllocationSite() &&
+ !AllocationSite::cast(allocation_site())->IsZombie();
+}
+
+
+AllocationSite* AllocationMemento::GetAllocationSite() {
+ DCHECK(IsValid());
+ return AllocationSite::cast(allocation_site());
+}
+
+
void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
JSObject::ValidateElements(object);
ElementsKind elements_kind = object->map()->elements_kind();
@@ -1832,12 +1980,6 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
}
-bool JSObject::WouldConvertToSlowElements(Handle<Object> key) {
- uint32_t index = 0;
- return key->ToArrayIndex(&index) && WouldConvertToSlowElements(index);
-}
-
-
void JSObject::SetMapAndElements(Handle<JSObject> object,
Handle<Map> new_map,
Handle<FixedArrayBase> value) {
@@ -1873,6 +2015,7 @@ void JSObject::initialize_elements() {
ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
+ACCESSORS(Oddball, type_of, String, kTypeOfOffset)
byte Oddball::kind() const {
@@ -1890,6 +2033,17 @@ ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(PropertyCell, property_details_raw, Object, kDetailsOffset)
ACCESSORS(PropertyCell, value, Object, kValueOffset)
+
+PropertyDetails PropertyCell::property_details() {
+ return PropertyDetails(Smi::cast(property_details_raw()));
+}
+
+
+void PropertyCell::set_property_details(PropertyDetails details) {
+ set_property_details_raw(details.AsSmi());
+}
+
+
Object* WeakCell::value() const { return READ_FIELD(this, kValueOffset); }
@@ -1994,7 +2148,7 @@ int JSObject::GetInternalFieldCount() {
// Make sure to adjust for the number of in-object properties. These
// properties do contribute to the size, but are not internal fields.
return ((Size() - GetHeaderSize()) >> kPointerSizeLog2) -
- map()->inobject_properties();
+ map()->GetInObjectProperties();
}
@@ -2150,7 +2304,8 @@ void JSObject::InitializeBody(Map* map,
int size = map->instance_size();
int offset = kHeaderSize;
if (filler_value != pre_allocated_value) {
- int pre_allocated = map->pre_allocated_property_fields();
+ int pre_allocated =
+ map->GetInObjectProperties() - map->unused_property_fields();
DCHECK(pre_allocated * kPointerSize + kHeaderSize <= size);
for (int i = 0; i < pre_allocated; i++) {
WRITE_FIELD(this, offset, pre_allocated_value);
@@ -2174,8 +2329,8 @@ bool Map::TooManyFastProperties(StoreFromKeyed store_mode) {
if (unused_property_fields() != 0) return false;
if (is_prototype_map()) return false;
int minimum = store_mode == CERTAINLY_NOT_STORE_FROM_KEYED ? 128 : 12;
- int limit = Max(minimum, inobject_properties());
- int external = NumberOfFields() - inobject_properties();
+ int limit = Max(minimum, GetInObjectProperties());
+ int external = NumberOfFields() - GetInObjectProperties();
return external > limit;
}
@@ -2227,15 +2382,9 @@ bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
void Object::VerifyApiCallResultType() {
#if DEBUG
- if (!(IsSmi() ||
- IsString() ||
- IsSymbol() ||
- IsSpecObject() ||
- IsHeapNumber() ||
- IsUndefined() ||
- IsTrue() ||
- IsFalse() ||
- IsNull())) {
+ if (!(IsSmi() || IsString() || IsSymbol() || IsSpecObject() ||
+ IsHeapNumber() || IsSimd128Value() || IsUndefined() || IsTrue() ||
+ IsFalse() || IsNull())) {
FATAL("API call returned invalid object");
}
#endif // DEBUG
@@ -2426,7 +2575,7 @@ AllocationAlignment HeapObject::RequiredAlignment() {
return kDoubleAligned;
}
if (IsHeapNumber()) return kDoubleUnaligned;
- if (IsFloat32x4()) return kSimd128Unaligned;
+ if (IsSimd128Value()) return kSimd128Unaligned;
#endif // V8_HOST_ARCH_32_BIT
return kWordAligned;
}
@@ -2508,6 +2657,11 @@ Object** FixedArray::data_start() {
}
+Object** FixedArray::RawFieldOfElementAt(int index) {
+ return HeapObject::RawField(this, OffsetOfElementAt(index));
+}
+
+
bool DescriptorArray::IsEmpty() {
DCHECK(length() >= kFirstIndex ||
this == GetHeap()->empty_descriptor_array());
@@ -2515,12 +2669,75 @@ bool DescriptorArray::IsEmpty() {
}
+int DescriptorArray::number_of_descriptors() {
+ DCHECK(length() >= kFirstIndex || IsEmpty());
+ int len = length();
+ return len == 0 ? 0 : Smi::cast(get(kDescriptorLengthIndex))->value();
+}
+
+
+int DescriptorArray::number_of_descriptors_storage() {
+ int len = length();
+ return len == 0 ? 0 : (len - kFirstIndex) / kDescriptorSize;
+}
+
+
+int DescriptorArray::NumberOfSlackDescriptors() {
+ return number_of_descriptors_storage() - number_of_descriptors();
+}
+
+
void DescriptorArray::SetNumberOfDescriptors(int number_of_descriptors) {
WRITE_FIELD(
this, kDescriptorLengthOffset, Smi::FromInt(number_of_descriptors));
}
+inline int DescriptorArray::number_of_entries() {
+ return number_of_descriptors();
+}
+
+
+bool DescriptorArray::HasEnumCache() {
+ return !IsEmpty() && !get(kEnumCacheIndex)->IsSmi();
+}
+
+
+void DescriptorArray::CopyEnumCacheFrom(DescriptorArray* array) {
+ set(kEnumCacheIndex, array->get(kEnumCacheIndex));
+}
+
+
+FixedArray* DescriptorArray::GetEnumCache() {
+ DCHECK(HasEnumCache());
+ FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex));
+ return FixedArray::cast(bridge->get(kEnumCacheBridgeCacheIndex));
+}
+
+
+bool DescriptorArray::HasEnumIndicesCache() {
+ if (IsEmpty()) return false;
+ Object* object = get(kEnumCacheIndex);
+ if (object->IsSmi()) return false;
+ FixedArray* bridge = FixedArray::cast(object);
+ return !bridge->get(kEnumCacheBridgeIndicesCacheIndex)->IsSmi();
+}
+
+
+FixedArray* DescriptorArray::GetEnumIndicesCache() {
+ DCHECK(HasEnumIndicesCache());
+ FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex));
+ return FixedArray::cast(bridge->get(kEnumCacheBridgeIndicesCacheIndex));
+}
+
+
+Object** DescriptorArray::GetEnumCacheSlot() {
+ DCHECK(HasEnumCache());
+ return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
+ kEnumCacheOffset);
+}
+
+
// Perform a binary search in a fixed array. Low and high are entry indices. If
// there are three entries in this array it should be called with low=0 and
// high=2.
@@ -2656,15 +2873,42 @@ PropertyDetails Map::GetLastDescriptorDetails() {
}
+int Map::LastAdded() {
+ int number_of_own_descriptors = NumberOfOwnDescriptors();
+ DCHECK(number_of_own_descriptors > 0);
+ return number_of_own_descriptors - 1;
+}
+
+
+int Map::NumberOfOwnDescriptors() {
+ return NumberOfOwnDescriptorsBits::decode(bit_field3());
+}
+
+
+void Map::SetNumberOfOwnDescriptors(int number) {
+ DCHECK(number <= instance_descriptors()->number_of_descriptors());
+ set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number));
+}
+
+
+int Map::EnumLength() { return EnumLengthBits::decode(bit_field3()); }
+
+
+void Map::SetEnumLength(int length) {
+ if (length != kInvalidEnumCacheSentinel) {
+ DCHECK(length >= 0);
+ DCHECK(length == 0 || instance_descriptors()->HasEnumCache());
+ DCHECK(length <= NumberOfOwnDescriptors());
+ }
+ set_bit_field3(EnumLengthBits::update(bit_field3(), length));
+}
+
+
FixedArrayBase* Map::GetInitialElements() {
if (has_fast_smi_or_object_elements() ||
has_fast_double_elements()) {
DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
return GetHeap()->empty_fixed_array();
- } else if (has_external_array_elements()) {
- ExternalArray* empty_array = GetHeap()->EmptyExternalArrayForMap(this);
- DCHECK(!GetHeap()->InNewSpace(empty_array));
- return empty_array;
} else if (has_fixed_typed_array_elements()) {
FixedTypedArrayBase* empty_array =
GetHeap()->EmptyFixedTypedArrayForMap(this);
@@ -2867,6 +3111,47 @@ DescriptorArray::WhitenessWitness::~WhitenessWitness() {
}
+PropertyType DescriptorArray::Entry::type() { return descs_->GetType(index_); }
+
+
+Object* DescriptorArray::Entry::GetCallbackObject() {
+ return descs_->GetValue(index_);
+}
+
+
+int HashTableBase::NumberOfElements() {
+ return Smi::cast(get(kNumberOfElementsIndex))->value();
+}
+
+
+int HashTableBase::NumberOfDeletedElements() {
+ return Smi::cast(get(kNumberOfDeletedElementsIndex))->value();
+}
+
+
+int HashTableBase::Capacity() {
+ return Smi::cast(get(kCapacityIndex))->value();
+}
+
+
+void HashTableBase::ElementAdded() {
+ SetNumberOfElements(NumberOfElements() + 1);
+}
+
+
+void HashTableBase::ElementRemoved() {
+ SetNumberOfElements(NumberOfElements() - 1);
+ SetNumberOfDeletedElements(NumberOfDeletedElements() + 1);
+}
+
+
+void HashTableBase::ElementsRemoved(int n) {
+ SetNumberOfElements(NumberOfElements() - n);
+ SetNumberOfDeletedElements(NumberOfDeletedElements() + n);
+}
+
+
+// static
int HashTableBase::ComputeCapacity(int at_least_space_for) {
const int kMinCapacity = 4;
int capacity = base::bits::RoundUpToPowerOfTwo32(at_least_space_for * 2);
@@ -2874,10 +3159,18 @@ int HashTableBase::ComputeCapacity(int at_least_space_for) {
}
-int HashTableBase::ComputeCapacityForSerialization(int at_least_space_for) {
- const int kMinCapacity = 1;
- int capacity = base::bits::RoundUpToPowerOfTwo32(at_least_space_for);
- return Max(capacity, kMinCapacity);
+bool HashTableBase::IsKey(Object* k) {
+ return !k->IsTheHole() && !k->IsUndefined();
+}
+
+
+void HashTableBase::SetNumberOfElements(int nof) {
+ set(kNumberOfElementsIndex, Smi::FromInt(nof));
+}
+
+
+void HashTableBase::SetNumberOfDeletedElements(int nod) {
+ set(kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
}
@@ -2942,7 +3235,11 @@ void SeededNumberDictionary::set_requires_slow_elements() {
CAST_ACCESSOR(AccessorInfo)
CAST_ACCESSOR(ArrayList)
+CAST_ACCESSOR(Bool16x8)
+CAST_ACCESSOR(Bool32x4)
+CAST_ACCESSOR(Bool8x16)
CAST_ACCESSOR(ByteArray)
+CAST_ACCESSOR(BytecodeArray)
CAST_ACCESSOR(Cell)
CAST_ACCESSOR(Code)
CAST_ACCESSOR(CodeCacheHashTable)
@@ -2952,19 +3249,9 @@ CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
CAST_ACCESSOR(DependentCode)
CAST_ACCESSOR(DescriptorArray)
-CAST_ACCESSOR(ExternalArray)
CAST_ACCESSOR(ExternalOneByteString)
-CAST_ACCESSOR(ExternalFloat32Array)
-CAST_ACCESSOR(ExternalFloat64Array)
-CAST_ACCESSOR(ExternalInt16Array)
-CAST_ACCESSOR(ExternalInt32Array)
-CAST_ACCESSOR(ExternalInt8Array)
CAST_ACCESSOR(ExternalString)
CAST_ACCESSOR(ExternalTwoByteString)
-CAST_ACCESSOR(ExternalUint16Array)
-CAST_ACCESSOR(ExternalUint32Array)
-CAST_ACCESSOR(ExternalUint8Array)
-CAST_ACCESSOR(ExternalUint8ClampedArray)
CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(FixedArrayBase)
CAST_ACCESSOR(FixedDoubleArray)
@@ -2975,6 +3262,9 @@ CAST_ACCESSOR(GlobalDictionary)
CAST_ACCESSOR(GlobalObject)
CAST_ACCESSOR(HandlerTable)
CAST_ACCESSOR(HeapObject)
+CAST_ACCESSOR(Int16x8)
+CAST_ACCESSOR(Int32x4)
+CAST_ACCESSOR(Int8x16)
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayBuffer)
CAST_ACCESSOR(JSArrayBufferView)
@@ -2983,7 +3273,6 @@ CAST_ACCESSOR(JSDataView)
CAST_ACCESSOR(JSDate)
CAST_ACCESSOR(JSFunction)
CAST_ACCESSOR(JSFunctionProxy)
-CAST_ACCESSOR(JSFunctionResultCache)
CAST_ACCESSOR(JSGeneratorObject)
CAST_ACCESSOR(JSGlobalObject)
CAST_ACCESSOR(JSGlobalProxy)
@@ -3019,6 +3308,7 @@ CAST_ACCESSOR(SeqOneByteString)
CAST_ACCESSOR(SeqString)
CAST_ACCESSOR(SeqTwoByteString)
CAST_ACCESSOR(SharedFunctionInfo)
+CAST_ACCESSOR(Simd128Value)
CAST_ACCESSOR(SlicedString)
CAST_ACCESSOR(Smi)
CAST_ACCESSOR(String)
@@ -3057,6 +3347,116 @@ FixedTypedArray<Traits>::cast(const Object* object) {
}
+#define DEFINE_DEOPT_ELEMENT_ACCESSORS(name, type) \
+ type* DeoptimizationInputData::name() { \
+ return type::cast(get(k##name##Index)); \
+ } \
+ void DeoptimizationInputData::Set##name(type* value) { \
+ set(k##name##Index, value); \
+ }
+
+DEFINE_DEOPT_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrAstId, Smi)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(OptimizationId, Smi)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(WeakCellCache, Object)
+
+#undef DEFINE_DEOPT_ELEMENT_ACCESSORS
+
+
+#define DEFINE_DEOPT_ENTRY_ACCESSORS(name, type) \
+ type* DeoptimizationInputData::name(int i) { \
+ return type::cast(get(IndexForEntry(i) + k##name##Offset)); \
+ } \
+ void DeoptimizationInputData::Set##name(int i, type* value) { \
+ set(IndexForEntry(i) + k##name##Offset, value); \
+ }
+
+DEFINE_DEOPT_ENTRY_ACCESSORS(AstIdRaw, Smi)
+DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
+DEFINE_DEOPT_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
+DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi)
+
+#undef DEFINE_DEOPT_ENTRY_ACCESSORS
+
+
+BailoutId DeoptimizationInputData::AstId(int i) {
+ return BailoutId(AstIdRaw(i)->value());
+}
+
+
+void DeoptimizationInputData::SetAstId(int i, BailoutId value) {
+ SetAstIdRaw(i, Smi::FromInt(value.ToInt()));
+}
+
+
+int DeoptimizationInputData::DeoptCount() {
+ return (length() - kFirstDeoptEntryIndex) / kDeoptEntrySize;
+}
+
+
+int DeoptimizationOutputData::DeoptPoints() { return length() / 2; }
+
+
+BailoutId DeoptimizationOutputData::AstId(int index) {
+ return BailoutId(Smi::cast(get(index * 2))->value());
+}
+
+
+void DeoptimizationOutputData::SetAstId(int index, BailoutId id) {
+ set(index * 2, Smi::FromInt(id.ToInt()));
+}
+
+
+Smi* DeoptimizationOutputData::PcAndState(int index) {
+ return Smi::cast(get(1 + index * 2));
+}
+
+
+void DeoptimizationOutputData::SetPcAndState(int index, Smi* offset) {
+ set(1 + index * 2, offset);
+}
+
+
+void HandlerTable::SetRangeStart(int index, int value) {
+ set(index * kRangeEntrySize + kRangeStartIndex, Smi::FromInt(value));
+}
+
+
+void HandlerTable::SetRangeEnd(int index, int value) {
+ set(index * kRangeEntrySize + kRangeEndIndex, Smi::FromInt(value));
+}
+
+
+void HandlerTable::SetRangeHandler(int index, int offset,
+ CatchPrediction prediction) {
+ int value = HandlerOffsetField::encode(offset) |
+ HandlerPredictionField::encode(prediction);
+ set(index * kRangeEntrySize + kRangeHandlerIndex, Smi::FromInt(value));
+}
+
+
+void HandlerTable::SetRangeDepth(int index, int value) {
+ set(index * kRangeEntrySize + kRangeDepthIndex, Smi::FromInt(value));
+}
+
+
+void HandlerTable::SetReturnOffset(int index, int value) {
+ set(index * kReturnEntrySize + kReturnOffsetIndex, Smi::FromInt(value));
+}
+
+
+void HandlerTable::SetReturnHandler(int index, int offset,
+ CatchPrediction prediction) {
+ int value = HandlerOffsetField::encode(offset) |
+ HandlerPredictionField::encode(prediction);
+ set(index * kReturnEntrySize + kReturnHandlerIndex, Smi::FromInt(value));
+}
+
+
#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
STRUCT_LIST(MAKE_STRUCT_CAST)
#undef MAKE_STRUCT_CAST
@@ -3088,6 +3488,9 @@ SMI_ACCESSORS(String, length, kLengthOffset)
SYNCHRONIZED_SMI_ACCESSORS(String, length, kLengthOffset)
+int FreeSpace::Size() { return size(); }
+
+
FreeSpace* FreeSpace::next() {
DCHECK(map() == GetHeap()->raw_unchecked_free_space_map() ||
(!GetHeap()->deserialization_complete() && map() == NULL));
@@ -3354,13 +3757,13 @@ uc16* SeqTwoByteString::GetChars() {
uint16_t SeqTwoByteString::SeqTwoByteStringGet(int index) {
DCHECK(index >= 0 && index < length());
- return READ_SHORT_FIELD(this, kHeaderSize + index * kShortSize);
+ return READ_UINT16_FIELD(this, kHeaderSize + index * kShortSize);
}
void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) {
DCHECK(index >= 0 && index < length());
- WRITE_SHORT_FIELD(this, kHeaderSize + index * kShortSize, value);
+ WRITE_UINT16_FIELD(this, kHeaderSize + index * kShortSize, value);
}
@@ -3579,40 +3982,7 @@ void StringCharacterStream::VisitTwoByteString(
}
-void JSFunctionResultCache::MakeZeroSize() {
- set_finger_index(kEntriesIndex);
- set_size(kEntriesIndex);
-}
-
-
-void JSFunctionResultCache::Clear() {
- int cache_size = size();
- Object** entries_start = RawFieldOfElementAt(kEntriesIndex);
- MemsetPointer(entries_start,
- GetHeap()->the_hole_value(),
- cache_size - kEntriesIndex);
- MakeZeroSize();
-}
-
-
-int JSFunctionResultCache::size() {
- return Smi::cast(get(kCacheSizeIndex))->value();
-}
-
-
-void JSFunctionResultCache::set_size(int size) {
- set(kCacheSizeIndex, Smi::FromInt(size));
-}
-
-
-int JSFunctionResultCache::finger_index() {
- return Smi::cast(get(kFingerIndex))->value();
-}
-
-
-void JSFunctionResultCache::set_finger_index(int finger_index) {
- set(kFingerIndex, Smi::FromInt(finger_index));
-}
+int ByteArray::Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
byte ByteArray::get(int index) {
@@ -3639,221 +4009,66 @@ ByteArray* ByteArray::FromDataStartAddress(Address address) {
}
-Address ByteArray::GetDataStartAddress() {
- return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
-}
-
-
-uint8_t* ExternalUint8ClampedArray::external_uint8_clamped_pointer() {
- return reinterpret_cast<uint8_t*>(external_pointer());
-}
-
-
-uint8_t ExternalUint8ClampedArray::get_scalar(int index) {
- DCHECK((index >= 0) && (index < this->length()));
- uint8_t* ptr = external_uint8_clamped_pointer();
- return ptr[index];
-}
-
-
-Handle<Object> ExternalUint8ClampedArray::get(
- Handle<ExternalUint8ClampedArray> array,
- int index) {
- return Handle<Smi>(Smi::FromInt(array->get_scalar(index)),
- array->GetIsolate());
-}
-
-
-void ExternalUint8ClampedArray::set(int index, uint8_t value) {
- DCHECK((index >= 0) && (index < this->length()));
- uint8_t* ptr = external_uint8_clamped_pointer();
- ptr[index] = value;
-}
-
-
-void* ExternalArray::external_pointer() const {
- intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
- return reinterpret_cast<void*>(ptr);
-}
-
-
-void ExternalArray::set_external_pointer(void* value, WriteBarrierMode mode) {
- intptr_t ptr = reinterpret_cast<intptr_t>(value);
- WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr);
-}
-
-
-int8_t ExternalInt8Array::get_scalar(int index) {
- DCHECK((index >= 0) && (index < this->length()));
- int8_t* ptr = static_cast<int8_t*>(external_pointer());
- return ptr[index];
-}
-
-
-Handle<Object> ExternalInt8Array::get(Handle<ExternalInt8Array> array,
- int index) {
- return Handle<Smi>(Smi::FromInt(array->get_scalar(index)),
- array->GetIsolate());
-}
-
-
-void ExternalInt8Array::set(int index, int8_t value) {
- DCHECK((index >= 0) && (index < this->length()));
- int8_t* ptr = static_cast<int8_t*>(external_pointer());
- ptr[index] = value;
-}
-
-
-uint8_t ExternalUint8Array::get_scalar(int index) {
- DCHECK((index >= 0) && (index < this->length()));
- uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
- return ptr[index];
-}
-
-
-Handle<Object> ExternalUint8Array::get(Handle<ExternalUint8Array> array,
- int index) {
- return Handle<Smi>(Smi::FromInt(array->get_scalar(index)),
- array->GetIsolate());
-}
-
-
-void ExternalUint8Array::set(int index, uint8_t value) {
- DCHECK((index >= 0) && (index < this->length()));
- uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
- ptr[index] = value;
-}
-
-
-int16_t ExternalInt16Array::get_scalar(int index) {
- DCHECK((index >= 0) && (index < this->length()));
- int16_t* ptr = static_cast<int16_t*>(external_pointer());
- return ptr[index];
-}
-
-
-Handle<Object> ExternalInt16Array::get(Handle<ExternalInt16Array> array,
- int index) {
- return Handle<Smi>(Smi::FromInt(array->get_scalar(index)),
- array->GetIsolate());
-}
-
-
-void ExternalInt16Array::set(int index, int16_t value) {
- DCHECK((index >= 0) && (index < this->length()));
- int16_t* ptr = static_cast<int16_t*>(external_pointer());
- ptr[index] = value;
-}
-
-
-uint16_t ExternalUint16Array::get_scalar(int index) {
- DCHECK((index >= 0) && (index < this->length()));
- uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
- return ptr[index];
-}
-
-
-Handle<Object> ExternalUint16Array::get(Handle<ExternalUint16Array> array,
- int index) {
- return Handle<Smi>(Smi::FromInt(array->get_scalar(index)),
- array->GetIsolate());
-}
-
-
-void ExternalUint16Array::set(int index, uint16_t value) {
- DCHECK((index >= 0) && (index < this->length()));
- uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
- ptr[index] = value;
-}
-
+int ByteArray::ByteArraySize() { return SizeFor(this->length()); }
-int32_t ExternalInt32Array::get_scalar(int index) {
- DCHECK((index >= 0) && (index < this->length()));
- int32_t* ptr = static_cast<int32_t*>(external_pointer());
- return ptr[index];
-}
-
-Handle<Object> ExternalInt32Array::get(Handle<ExternalInt32Array> array,
- int index) {
- return array->GetIsolate()->factory()->
- NewNumberFromInt(array->get_scalar(index));
-}
-
-
-void ExternalInt32Array::set(int index, int32_t value) {
- DCHECK((index >= 0) && (index < this->length()));
- int32_t* ptr = static_cast<int32_t*>(external_pointer());
- ptr[index] = value;
+Address ByteArray::GetDataStartAddress() {
+ return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
}
-uint32_t ExternalUint32Array::get_scalar(int index) {
- DCHECK((index >= 0) && (index < this->length()));
- uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
- return ptr[index];
+byte BytecodeArray::get(int index) {
+ DCHECK(index >= 0 && index < this->length());
+ return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
}
-Handle<Object> ExternalUint32Array::get(Handle<ExternalUint32Array> array,
- int index) {
- return array->GetIsolate()->factory()->
- NewNumberFromUint(array->get_scalar(index));
+void BytecodeArray::set(int index, byte value) {
+ DCHECK(index >= 0 && index < this->length());
+ WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
}
-void ExternalUint32Array::set(int index, uint32_t value) {
- DCHECK((index >= 0) && (index < this->length()));
- uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
- ptr[index] = value;
+void BytecodeArray::set_frame_size(int frame_size) {
+ DCHECK_GE(frame_size, 0);
+ DCHECK(IsAligned(frame_size, static_cast<unsigned>(kPointerSize)));
+ WRITE_INT_FIELD(this, kFrameSizeOffset, frame_size);
}
-float ExternalFloat32Array::get_scalar(int index) {
- DCHECK((index >= 0) && (index < this->length()));
- float* ptr = static_cast<float*>(external_pointer());
- return ptr[index];
+int BytecodeArray::frame_size() const {
+ return READ_INT_FIELD(this, kFrameSizeOffset);
}
-Handle<Object> ExternalFloat32Array::get(Handle<ExternalFloat32Array> array,
- int index) {
- return array->GetIsolate()->factory()->NewNumber(array->get_scalar(index));
+Address BytecodeArray::GetFirstBytecodeAddress() {
+ return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
}
-void ExternalFloat32Array::set(int index, float value) {
- DCHECK((index >= 0) && (index < this->length()));
- float* ptr = static_cast<float*>(external_pointer());
- ptr[index] = value;
-}
+int BytecodeArray::BytecodeArraySize() { return SizeFor(this->length()); }
-double ExternalFloat64Array::get_scalar(int index) {
- DCHECK((index >= 0) && (index < this->length()));
- double* ptr = static_cast<double*>(external_pointer());
- return ptr[index];
-}
+ACCESSORS(FixedTypedArrayBase, base_pointer, Object, kBasePointerOffset)
-Handle<Object> ExternalFloat64Array::get(Handle<ExternalFloat64Array> array,
- int index) {
- return array->GetIsolate()->factory()->NewNumber(array->get_scalar(index));
+void* FixedTypedArrayBase::external_pointer() const {
+ intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
+ return reinterpret_cast<void*>(ptr);
}
-void ExternalFloat64Array::set(int index, double value) {
- DCHECK((index >= 0) && (index < this->length()));
- double* ptr = static_cast<double*>(external_pointer());
- ptr[index] = value;
+void FixedTypedArrayBase::set_external_pointer(void* value,
+ WriteBarrierMode mode) {
+ intptr_t ptr = reinterpret_cast<intptr_t>(value);
+ WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr);
}
-ACCESSORS(FixedTypedArrayBase, base_pointer, Object, kBasePointerOffset)
-
-
void* FixedTypedArrayBase::DataPtr() {
- return FIELD_ADDR(this, kDataOffset);
+ return reinterpret_cast<void*>(
+ reinterpret_cast<intptr_t>(base_pointer()) +
+ reinterpret_cast<intptr_t>(external_pointer()));
}
@@ -3876,6 +4091,7 @@ int FixedTypedArrayBase::ElementSize(InstanceType type) {
int FixedTypedArrayBase::DataSize(InstanceType type) {
+ if (base_pointer() == Smi::FromInt(0)) return 0;
return length() * ElementSize(type);
}
@@ -3934,8 +4150,7 @@ double Float64ArrayTraits::defaultValue() {
template <class Traits>
typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) {
DCHECK((index >= 0) && (index < this->length()));
- ElementType* ptr = reinterpret_cast<ElementType*>(
- FIELD_ADDR(this, kDataOffset));
+ ElementType* ptr = reinterpret_cast<ElementType*>(DataPtr());
return ptr[index];
}
@@ -3943,8 +4158,7 @@ typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) {
template <class Traits>
void FixedTypedArray<Traits>::set(int index, ElementType value) {
DCHECK((index >= 0) && (index < this->length()));
- ElementType* ptr = reinterpret_cast<ElementType*>(
- FIELD_ADDR(this, kDataOffset));
+ ElementType* ptr = reinterpret_cast<ElementType*>(DataPtr());
ptr[index] = value;
}
@@ -4080,19 +4294,46 @@ int Map::instance_size() {
}
-int Map::inobject_properties() {
- return READ_BYTE_FIELD(this, kInObjectPropertiesOffset);
+int Map::inobject_properties_or_constructor_function_index() {
+ return READ_BYTE_FIELD(this,
+ kInObjectPropertiesOrConstructorFunctionIndexOffset);
+}
+
+
+void Map::set_inobject_properties_or_constructor_function_index(int value) {
+ DCHECK(0 <= value && value < 256);
+ WRITE_BYTE_FIELD(this, kInObjectPropertiesOrConstructorFunctionIndexOffset,
+ static_cast<byte>(value));
}
-int Map::pre_allocated_property_fields() {
- return READ_BYTE_FIELD(this, kPreAllocatedPropertyFieldsOffset);
+int Map::GetInObjectProperties() {
+ DCHECK(IsJSObjectMap());
+ return inobject_properties_or_constructor_function_index();
+}
+
+
+void Map::SetInObjectProperties(int value) {
+ DCHECK(IsJSObjectMap());
+ set_inobject_properties_or_constructor_function_index(value);
+}
+
+
+int Map::GetConstructorFunctionIndex() {
+ DCHECK(IsPrimitiveMap());
+ return inobject_properties_or_constructor_function_index();
+}
+
+
+void Map::SetConstructorFunctionIndex(int value) {
+ DCHECK(IsPrimitiveMap());
+ set_inobject_properties_or_constructor_function_index(value);
}
int Map::GetInObjectPropertyOffset(int index) {
// Adjust for the number of properties stored in the object.
- index -= inobject_properties();
+ index -= GetInObjectProperties();
DCHECK(index <= 0);
return instance_size() + (index * kPointerSize);
}
@@ -4124,6 +4365,9 @@ int HeapObject::SizeFromMap(Map* map) {
if (instance_type == BYTE_ARRAY_TYPE) {
return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
}
+ if (instance_type == BYTECODE_ARRAY_TYPE) {
+ return reinterpret_cast<BytecodeArray*>(this)->BytecodeArraySize();
+ }
if (instance_type == FREE_SPACE_TYPE) {
return reinterpret_cast<FreeSpace*>(this)->nobarrier_size();
}
@@ -4157,18 +4401,7 @@ void Map::set_instance_size(int value) {
}
-void Map::set_inobject_properties(int value) {
- DCHECK(0 <= value && value < 256);
- WRITE_BYTE_FIELD(this, kInObjectPropertiesOffset, static_cast<byte>(value));
-}
-
-
-void Map::set_pre_allocated_property_fields(int value) {
- DCHECK(0 <= value && value < 256);
- WRITE_BYTE_FIELD(this,
- kPreAllocatedPropertyFieldsOffset,
- static_cast<byte>(value));
-}
+void Map::clear_unused() { WRITE_BYTE_FIELD(this, kUnusedOffset, 0); }
InstanceType Map::instance_type() {
@@ -4231,6 +4464,51 @@ bool Map::function_with_prototype() {
}
+void Map::set_is_hidden_prototype() {
+ set_bit_field(bit_field() | (1 << kIsHiddenPrototype));
+}
+
+
+bool Map::is_hidden_prototype() {
+ return ((1 << kIsHiddenPrototype) & bit_field()) != 0;
+}
+
+
+void Map::set_has_indexed_interceptor() {
+ set_bit_field(bit_field() | (1 << kHasIndexedInterceptor));
+}
+
+
+bool Map::has_indexed_interceptor() {
+ return ((1 << kHasIndexedInterceptor) & bit_field()) != 0;
+}
+
+
+void Map::set_is_undetectable() {
+ set_bit_field(bit_field() | (1 << kIsUndetectable));
+}
+
+
+bool Map::is_undetectable() {
+ return ((1 << kIsUndetectable) & bit_field()) != 0;
+}
+
+
+void Map::set_is_observed() { set_bit_field(bit_field() | (1 << kIsObserved)); }
+
+bool Map::is_observed() { return ((1 << kIsObserved) & bit_field()) != 0; }
+
+
+void Map::set_has_named_interceptor() {
+ set_bit_field(bit_field() | (1 << kHasNamedInterceptor));
+}
+
+
+bool Map::has_named_interceptor() {
+ return ((1 << kHasNamedInterceptor) & bit_field()) != 0;
+}
+
+
void Map::set_is_access_check_needed(bool access_check_needed) {
if (access_check_needed) {
set_bit_field(bit_field() | (1 << kIsAccessCheckNeeded));
@@ -4267,6 +4545,50 @@ bool Map::is_prototype_map() const {
}
+void Map::set_elements_kind(ElementsKind elements_kind) {
+ DCHECK(static_cast<int>(elements_kind) < kElementsKindCount);
+ DCHECK(kElementsKindCount <= (1 << Map::ElementsKindBits::kSize));
+ set_bit_field2(Map::ElementsKindBits::update(bit_field2(), elements_kind));
+ DCHECK(this->elements_kind() == elements_kind);
+}
+
+
+ElementsKind Map::elements_kind() {
+ return Map::ElementsKindBits::decode(bit_field2());
+}
+
+
+bool Map::has_fast_smi_elements() {
+ return IsFastSmiElementsKind(elements_kind());
+}
+
+bool Map::has_fast_object_elements() {
+ return IsFastObjectElementsKind(elements_kind());
+}
+
+bool Map::has_fast_smi_or_object_elements() {
+ return IsFastSmiOrObjectElementsKind(elements_kind());
+}
+
+bool Map::has_fast_double_elements() {
+ return IsFastDoubleElementsKind(elements_kind());
+}
+
+bool Map::has_fast_elements() { return IsFastElementsKind(elements_kind()); }
+
+bool Map::has_sloppy_arguments_elements() {
+ return IsSloppyArgumentsElements(elements_kind());
+}
+
+bool Map::has_fixed_typed_array_elements() {
+ return IsFixedTypedArrayElementsKind(elements_kind());
+}
+
+bool Map::has_dictionary_elements() {
+ return IsDictionaryElementsKind(elements_kind());
+}
+
+
void Map::set_dictionary_map(bool value) {
uint32_t new_bit_field3 = DictionaryMap::update(bit_field3(), value);
new_bit_field3 = IsUnstable::update(new_bit_field3, value);
@@ -4381,6 +4703,39 @@ void Map::NotifyLeafMapLayoutChange() {
}
+bool Map::CanTransition() {
+ // Only JSObject and subtypes have map transitions and back pointers.
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
+ return instance_type() >= FIRST_JS_OBJECT_TYPE;
+}
+
+
+bool Map::IsPrimitiveMap() {
+ STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
+ return instance_type() <= LAST_PRIMITIVE_TYPE;
+}
+bool Map::IsJSObjectMap() {
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ return instance_type() >= FIRST_JS_OBJECT_TYPE;
+}
+bool Map::IsJSArrayMap() { return instance_type() == JS_ARRAY_TYPE; }
+bool Map::IsStringMap() { return instance_type() < FIRST_NONSTRING_TYPE; }
+bool Map::IsJSProxyMap() {
+ InstanceType type = instance_type();
+ return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE;
+}
+bool Map::IsJSGlobalProxyMap() {
+ return instance_type() == JS_GLOBAL_PROXY_TYPE;
+}
+bool Map::IsJSGlobalObjectMap() {
+ return instance_type() == JS_GLOBAL_OBJECT_TYPE;
+}
+bool Map::IsGlobalObjectMap() {
+ const InstanceType type = instance_type();
+ return type == JS_GLOBAL_OBJECT_TYPE || type == JS_BUILTINS_OBJECT_TYPE;
+}
+
+
bool Map::CanOmitMapChecks() {
return is_stable() && FLAG_omit_map_checks_for_leaf_maps;
}
@@ -4447,6 +4802,16 @@ bool Code::IsCodeStubOrIC() {
}
+bool Code::IsJavaScriptCode() {
+ if (kind() == FUNCTION || kind() == OPTIMIZED_FUNCTION) {
+ return true;
+ }
+ Handle<Code> interpreter_entry =
+ GetIsolate()->builtins()->InterpreterEntryTrampoline();
+ return interpreter_entry.location() != nullptr && *interpreter_entry == this;
+}
+
+
InlineCacheState Code::ic_state() {
InlineCacheState result = ExtractICStateFromFlags(flags());
// Only allow uninitialized or debugger states for non-IC code
@@ -4529,61 +4894,46 @@ inline void Code::set_can_have_weak_objects(bool value) {
bool Code::has_deoptimization_support() {
DCHECK_EQ(FUNCTION, kind());
- byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+ unsigned flags = READ_UINT32_FIELD(this, kFullCodeFlags);
return FullCodeFlagsHasDeoptimizationSupportField::decode(flags);
}
void Code::set_has_deoptimization_support(bool value) {
DCHECK_EQ(FUNCTION, kind());
- byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+ unsigned flags = READ_UINT32_FIELD(this, kFullCodeFlags);
flags = FullCodeFlagsHasDeoptimizationSupportField::update(flags, value);
- WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
+ WRITE_UINT32_FIELD(this, kFullCodeFlags, flags);
}
bool Code::has_debug_break_slots() {
DCHECK_EQ(FUNCTION, kind());
- byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+ unsigned flags = READ_UINT32_FIELD(this, kFullCodeFlags);
return FullCodeFlagsHasDebugBreakSlotsField::decode(flags);
}
void Code::set_has_debug_break_slots(bool value) {
DCHECK_EQ(FUNCTION, kind());
- byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+ unsigned flags = READ_UINT32_FIELD(this, kFullCodeFlags);
flags = FullCodeFlagsHasDebugBreakSlotsField::update(flags, value);
- WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
-}
-
-
-bool Code::is_compiled_optimizable() {
- DCHECK_EQ(FUNCTION, kind());
- byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
- return FullCodeFlagsIsCompiledOptimizable::decode(flags);
-}
-
-
-void Code::set_compiled_optimizable(bool value) {
- DCHECK_EQ(FUNCTION, kind());
- byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
- flags = FullCodeFlagsIsCompiledOptimizable::update(flags, value);
- WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
+ WRITE_UINT32_FIELD(this, kFullCodeFlags, flags);
}
bool Code::has_reloc_info_for_serialization() {
DCHECK_EQ(FUNCTION, kind());
- byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+ unsigned flags = READ_UINT32_FIELD(this, kFullCodeFlags);
return FullCodeFlagsHasRelocInfoForSerialization::decode(flags);
}
void Code::set_has_reloc_info_for_serialization(bool value) {
DCHECK_EQ(FUNCTION, kind());
- byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+ unsigned flags = READ_UINT32_FIELD(this, kFullCodeFlags);
flags = FullCodeFlagsHasRelocInfoForSerialization::update(flags, value);
- WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
+ WRITE_UINT32_FIELD(this, kFullCodeFlags, flags);
}
@@ -4605,14 +4955,16 @@ void Code::set_allow_osr_at_loop_nesting_level(int level) {
int Code::profiler_ticks() {
DCHECK_EQ(FUNCTION, kind());
- return READ_BYTE_FIELD(this, kProfilerTicksOffset);
+ return ProfilerTicksField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_profiler_ticks(int ticks) {
- DCHECK(ticks < 256);
if (kind() == FUNCTION) {
- WRITE_BYTE_FIELD(this, kProfilerTicksOffset, ticks);
+ unsigned previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ unsigned updated = ProfilerTicksField::update(previous, ticks);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
}
@@ -4733,8 +5085,25 @@ bool Code::is_keyed_stub() {
}
-bool Code::is_debug_stub() {
- return ic_state() == DEBUG_STUB;
+bool Code::is_debug_stub() { return ic_state() == DEBUG_STUB; }
+bool Code::is_handler() { return kind() == HANDLER; }
+bool Code::is_load_stub() { return kind() == LOAD_IC; }
+bool Code::is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; }
+bool Code::is_store_stub() { return kind() == STORE_IC; }
+bool Code::is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
+bool Code::is_call_stub() { return kind() == CALL_IC; }
+bool Code::is_binary_op_stub() { return kind() == BINARY_OP_IC; }
+bool Code::is_compare_ic_stub() { return kind() == COMPARE_IC; }
+bool Code::is_compare_nil_ic_stub() { return kind() == COMPARE_NIL_IC; }
+bool Code::is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
+bool Code::is_optimized_code() { return kind() == OPTIMIZED_FUNCTION; }
+
+
+bool Code::embeds_maps_weakly() {
+ Kind k = kind();
+ return (k == LOAD_IC || k == STORE_IC || k == KEYED_LOAD_IC ||
+ k == KEYED_STORE_IC || k == COMPARE_NIL_IC) &&
+ ic_state() == MONOMORPHIC;
}
@@ -4831,6 +5200,18 @@ Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
}
+bool Code::CanContainWeakObjects() {
+ // is_turbofanned() implies !can_have_weak_objects().
+ DCHECK(!is_optimized_code() || !is_turbofanned() || !can_have_weak_objects());
+ return is_optimized_code() && can_have_weak_objects();
+}
+
+
+bool Code::IsWeakObject(Object* object) {
+ return (CanContainWeakObjects() && IsWeakObjectInOptimizedCode(object));
+}
+
+
bool Code::IsWeakObjectInOptimizedCode(Object* object) {
if (object->IsMap()) {
return Map::cast(object)->CanTransition() &&
@@ -4841,7 +5222,8 @@ bool Code::IsWeakObjectInOptimizedCode(Object* object) {
} else if (object->IsPropertyCell()) {
object = PropertyCell::cast(object)->value();
}
- if (object->IsJSObject()) {
+ if (object->IsJSObject() || object->IsJSProxy()) {
+ // JSProxy is handled like JSObject because it can morph into one.
return FLAG_weak_embedded_objects_in_optimized_code;
}
if (object->IsFixedArray()) {
@@ -5179,8 +5561,7 @@ void Script::set_origin_options(ScriptOriginOptions origin_options) {
ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
-ACCESSORS(DebugInfo, original_code, Code, kOriginalCodeIndex)
-ACCESSORS(DebugInfo, code, Code, kPatchedCodeIndex)
+ACCESSORS(DebugInfo, code, Code, kCodeIndex)
ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateIndex)
ACCESSORS_TO_SMI(BreakPointInfo, code_position, kCodePositionIndex)
@@ -5455,6 +5836,9 @@ void SharedFunctionInfo::ReplaceCode(Code* value) {
}
DCHECK(code()->gc_metadata() == NULL && value->gc_metadata() == NULL);
+#ifdef DEBUG
+ Code::VerifyRecompiledCode(code(), value);
+#endif // DEBUG
set_code(value);
@@ -5486,8 +5870,26 @@ bool SharedFunctionInfo::is_compiled() {
}
-bool SharedFunctionInfo::is_simple_parameter_list() {
- return scope_info()->IsSimpleParameterList();
+bool SharedFunctionInfo::has_simple_parameters() {
+ return scope_info()->HasSimpleParameters();
+}
+
+
+bool SharedFunctionInfo::HasDebugInfo() {
+ bool has_debug_info = debug_info()->IsStruct();
+ DCHECK(!has_debug_info || HasDebugCode());
+ return has_debug_info;
+}
+
+
+DebugInfo* SharedFunctionInfo::GetDebugInfo() {
+ DCHECK(HasDebugInfo());
+ return DebugInfo::cast(debug_info());
+}
+
+
+bool SharedFunctionInfo::HasDebugCode() {
+ return code()->kind() == Code::FUNCTION && code()->has_debug_break_slots();
}
@@ -5513,6 +5915,17 @@ BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
}
+bool SharedFunctionInfo::HasBytecodeArray() {
+ return function_data()->IsBytecodeArray();
+}
+
+
+BytecodeArray* SharedFunctionInfo::bytecode_array() {
+ DCHECK(HasBytecodeArray());
+ return BytecodeArray::cast(function_data());
+}
+
+
int SharedFunctionInfo::ic_age() {
return ICAgeBits::decode(counters());
}
@@ -5587,29 +6000,28 @@ void SharedFunctionInfo::TryReenableOptimization() {
}
-bool JSFunction::IsBuiltin() {
- return context()->global_object()->IsJSBuiltinsObject();
+void SharedFunctionInfo::set_disable_optimization_reason(BailoutReason reason) {
+ set_opt_count_and_bailout_reason(DisabledOptimizationReasonBits::update(
+ opt_count_and_bailout_reason(), reason));
}
-bool JSFunction::IsFromNativeScript() {
- Object* script = shared()->script();
- bool native = script->IsScript() &&
- Script::cast(script)->type()->value() == Script::TYPE_NATIVE;
- DCHECK(!IsBuiltin() || native); // All builtins are also native.
- return native;
+bool SharedFunctionInfo::IsSubjectToDebugging() {
+ Object* script_obj = script();
+ if (script_obj->IsUndefined()) return false;
+ Script* script = Script::cast(script_obj);
+ Script::Type type = static_cast<Script::Type>(script->type()->value());
+ return type == Script::TYPE_NORMAL;
}
-bool JSFunction::IsFromExtensionScript() {
- Object* script = shared()->script();
- return script->IsScript() &&
- Script::cast(script)->type()->value() == Script::TYPE_EXTENSION;
+bool JSFunction::IsBuiltin() {
+ return context()->global_object()->IsJSBuiltinsObject();
}
bool JSFunction::IsSubjectToDebugging() {
- return !IsFromNativeScript() && !IsFromExtensionScript();
+ return shared()->IsSubjectToDebugging();
}
@@ -5771,8 +6183,8 @@ bool JSFunction::is_compiled() {
}
-bool JSFunction::is_simple_parameter_list() {
- return shared()->is_simple_parameter_list();
+bool JSFunction::has_simple_parameters() {
+ return shared()->has_simple_parameters();
}
@@ -6024,6 +6436,17 @@ bool Code::contains(byte* inner_pointer) {
}
+int Code::ExecutableSize() {
+ // Check that the assumptions about the layout of the code object holds.
+ DCHECK_EQ(static_cast<int>(instruction_start() - address()),
+ Code::kHeaderSize);
+ return instruction_size() + Code::kHeaderSize;
+}
+
+
+int Code::CodeSize() { return SizeFor(body_size()); }
+
+
ACCESSORS(JSArray, length, Object, kLengthOffset)
@@ -6093,32 +6516,6 @@ void JSArrayBuffer::set_is_shared(bool value) {
}
-// static
-template <typename StaticVisitor>
-void JSArrayBuffer::JSArrayBufferIterateBody(Heap* heap, HeapObject* obj) {
- StaticVisitor::VisitPointers(
- heap,
- HeapObject::RawField(obj, JSArrayBuffer::BodyDescriptor::kStartOffset),
- HeapObject::RawField(obj,
- JSArrayBuffer::kByteLengthOffset + kPointerSize));
- StaticVisitor::VisitPointers(
- heap, HeapObject::RawField(obj, JSArrayBuffer::kSize),
- HeapObject::RawField(obj, JSArrayBuffer::kSizeWithInternalFields));
-}
-
-
-void JSArrayBuffer::JSArrayBufferIterateBody(HeapObject* obj,
- ObjectVisitor* v) {
- v->VisitPointers(
- HeapObject::RawField(obj, JSArrayBuffer::BodyDescriptor::kStartOffset),
- HeapObject::RawField(obj,
- JSArrayBuffer::kByteLengthOffset + kPointerSize));
- v->VisitPointers(
- HeapObject::RawField(obj, JSArrayBuffer::kSize),
- HeapObject::RawField(obj, JSArrayBuffer::kSizeWithInternalFields));
-}
-
-
Object* JSArrayBufferView::byte_offset() const {
if (WasNeutered()) return Smi::FromInt(0);
return Object::cast(READ_FIELD(this, kByteOffsetOffset));
@@ -6311,27 +6708,6 @@ bool JSObject::HasSloppyArgumentsElements() {
}
-bool JSObject::HasExternalArrayElements() {
- HeapObject* array = elements();
- DCHECK(array != NULL);
- return array->IsExternalArray();
-}
-
-
-#define EXTERNAL_ELEMENTS_CHECK(Type, type, TYPE, ctype, size) \
-bool JSObject::HasExternal##Type##Elements() { \
- HeapObject* array = elements(); \
- DCHECK(array != NULL); \
- if (!array->IsHeapObject()) \
- return false; \
- return array->map()->instance_type() == EXTERNAL_##TYPE##_ARRAY_TYPE; \
-}
-
-TYPED_ARRAYS(EXTERNAL_ELEMENTS_CHECK)
-
-#undef EXTERNAL_ELEMENTS_CHECK
-
-
bool JSObject::HasFixedTypedArrayElements() {
HeapObject* array = elements();
DCHECK(array != NULL);
@@ -6525,6 +6901,10 @@ uint32_t StringHasher::HashSequentialString(const schar* chars,
}
+IteratingStringHasher::IteratingStringHasher(int len, uint32_t seed)
+ : StringHasher(len, seed) {}
+
+
uint32_t IteratingStringHasher::Hash(String* string, uint32_t seed) {
IteratingStringHasher hasher(string->length(), seed);
// Nothing to do.
@@ -6758,6 +7138,51 @@ bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
}
+bool AccessorInfo::HasExpectedReceiverType() {
+ return expected_receiver_type()->IsFunctionTemplateInfo();
+}
+
+
+Object* AccessorPair::get(AccessorComponent component) {
+ return component == ACCESSOR_GETTER ? getter() : setter();
+}
+
+
+void AccessorPair::set(AccessorComponent component, Object* value) {
+ if (component == ACCESSOR_GETTER) {
+ set_getter(value);
+ } else {
+ set_setter(value);
+ }
+}
+
+
+void AccessorPair::SetComponents(Object* getter, Object* setter) {
+ if (!getter->IsNull()) set_getter(getter);
+ if (!setter->IsNull()) set_setter(setter);
+}
+
+
+bool AccessorPair::Equals(AccessorPair* pair) {
+ return (this == pair) || pair->Equals(getter(), setter());
+}
+
+
+bool AccessorPair::Equals(Object* getter_value, Object* setter_value) {
+ return (getter() == getter_value) && (setter() == setter_value);
+}
+
+
+bool AccessorPair::ContainsAccessor() {
+ return IsJSAccessor(getter()) || IsJSAccessor(setter());
+}
+
+
+bool AccessorPair::IsJSAccessor(Object* obj) {
+ return obj->IsSpecFunction() || obj->IsUndefined();
+}
+
+
template<typename Derived, typename Shape, typename Key>
void Dictionary<Derived, Shape, Key>::SetEntry(int entry,
Handle<Object> key,
@@ -6931,6 +7356,11 @@ Handle<ObjectHashTable> ObjectHashTable::Shrink(
}
+Object* OrderedHashMap::ValueAt(int entry) {
+ return get(EntryToIndex(entry) + kValueOffset);
+}
+
+
template <int entrysize>
bool WeakHashTableShape<entrysize>::IsMatch(Handle<Object> key, Object* other) {
if (other->IsWeakCell()) other = WeakCell::cast(other)->value();
@@ -6965,6 +7395,30 @@ Handle<Object> WeakHashTableShape<entrysize>::AsHandle(Isolate* isolate,
}
+bool ScopeInfo::IsAsmModule() { return AsmModuleField::decode(Flags()); }
+
+
+bool ScopeInfo::IsAsmFunction() { return AsmFunctionField::decode(Flags()); }
+
+
+bool ScopeInfo::HasSimpleParameters() {
+ return HasSimpleParametersField::decode(Flags());
+}
+
+
+#define SCOPE_INFO_FIELD_ACCESSORS(name) \
+ void ScopeInfo::Set##name(int value) { set(k##name, Smi::FromInt(value)); } \
+ int ScopeInfo::name() { \
+ if (length() > 0) { \
+ return Smi::cast(get(k##name))->value(); \
+ } else { \
+ return 0; \
+ } \
+ }
+FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(SCOPE_INFO_FIELD_ACCESSORS)
+#undef SCOPE_INFO_FIELD_ACCESSORS
+
+
void Map::ClearCodeCache(Heap* heap) {
// No write barrier is needed since empty_fixed_array is not in new space.
// Please note this function is used during marking:
@@ -6975,12 +7429,13 @@ void Map::ClearCodeCache(Heap* heap) {
}
-int Map::SlackForArraySize(bool is_prototype_map, int old_size,
- int size_limit) {
+int Map::SlackForArraySize(int old_size, int size_limit) {
const int max_slack = size_limit - old_size;
CHECK_LE(0, max_slack);
- if (old_size < 4) return Min(max_slack, 1);
- if (is_prototype_map) return Min(max_slack, 4);
+ if (old_size < 4) {
+ DCHECK_LE(1, max_slack);
+ return 1;
+ }
return Min(max_slack, old_size / 4);
}
@@ -7002,7 +7457,7 @@ bool JSArray::SetLengthWouldNormalize(Heap* heap, uint32_t new_length) {
bool JSArray::AllowsSetLength() {
bool result = elements()->IsFixedArray() || elements()->IsFixedDoubleArray();
- DCHECK(result == !HasExternalArrayElements());
+ DCHECK(result == !HasFixedTypedArrayElements());
return result;
}
@@ -7134,11 +7589,24 @@ Relocatable::~Relocatable() {
}
+// static
int JSObject::BodyDescriptor::SizeOf(Map* map, HeapObject* object) {
return map->instance_size();
}
+// static
+int FixedArray::BodyDescriptor::SizeOf(Map* map, HeapObject* object) {
+ return SizeFor(reinterpret_cast<FixedArray*>(object)->synchronized_length());
+}
+
+
+// static
+int StructBodyDescriptor::SizeOf(Map* map, HeapObject* object) {
+ return map->instance_size();
+}
+
+
void Foreign::ForeignIterateBody(ObjectVisitor* v) {
v->VisitExternalReference(
reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
@@ -7271,6 +7739,12 @@ Object* JSMapIterator::CurrentValue() {
}
+String::SubStringRange::SubStringRange(String* string, int first, int length)
+ : string_(string),
+ first_(first),
+ length_(length == -1 ? string->length() : length) {}
+
+
class String::SubStringRange::iterator final {
public:
typedef std::forward_iterator_tag iterator_category;
@@ -7338,10 +7812,24 @@ String::SubStringRange::iterator String::SubStringRange::end() {
#undef WRITE_INT_FIELD
#undef READ_INTPTR_FIELD
#undef WRITE_INTPTR_FIELD
+#undef READ_UINT8_FIELD
+#undef WRITE_UINT8_FIELD
+#undef READ_INT8_FIELD
+#undef WRITE_INT8_FIELD
+#undef READ_UINT16_FIELD
+#undef WRITE_UINT16_FIELD
+#undef READ_INT16_FIELD
+#undef WRITE_INT16_FIELD
#undef READ_UINT32_FIELD
#undef WRITE_UINT32_FIELD
-#undef READ_SHORT_FIELD
-#undef WRITE_SHORT_FIELD
+#undef READ_INT32_FIELD
+#undef WRITE_INT32_FIELD
+#undef READ_FLOAT_FIELD
+#undef WRITE_FLOAT_FIELD
+#undef READ_UINT64_FIELD
+#undef WRITE_UINT64_FIELD
+#undef READ_INT64_FIELD
+#undef WRITE_INT64_FIELD
#undef READ_BYTE_FIELD
#undef WRITE_BYTE_FIELD
#undef NOBARRIER_READ_BYTE_FIELD
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index b514a9443f..799561eb8b 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -6,9 +6,9 @@
#include "src/disasm.h"
#include "src/disassembler.h"
-#include "src/heap/objects-visiting.h"
-#include "src/jsregexp.h"
+#include "src/interpreter/bytecodes.h"
#include "src/ostreams.h"
+#include "src/regexp/jsregexp.h"
namespace v8 {
namespace internal {
@@ -60,8 +60,8 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
HeapNumber::cast(this)->HeapNumberPrint(os);
os << ">";
break;
- case FLOAT32X4_TYPE:
- Float32x4::cast(this)->Float32x4Print(os);
+ case SIMD128_VALUE_TYPE:
+ Simd128Value::cast(this)->Simd128ValuePrint(os);
break;
case FIXED_DOUBLE_ARRAY_TYPE:
FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(os);
@@ -72,18 +72,13 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayPrint(os);
break;
+ case BYTECODE_ARRAY_TYPE:
+ BytecodeArray::cast(this)->BytecodeArrayPrint(os);
+ break;
case FREE_SPACE_TYPE:
FreeSpace::cast(this)->FreeSpacePrint(os);
break;
-#define PRINT_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ARRAY_TYPE: \
- External##Type##Array::cast(this)->External##Type##ArrayPrint(os); \
- break;
-
- TYPED_ARRAYS(PRINT_EXTERNAL_ARRAY)
-#undef PRINT_EXTERNAL_ARRAY
-
#define PRINT_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
case Fixed##Type##Array::kInstanceType: \
Fixed##Type##Array::cast(this)->FixedTypedArrayPrint(os); \
@@ -196,24 +191,68 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
}
-void ByteArray::ByteArrayPrint(std::ostream& os) { // NOLINT
- os << "byte array, data starts at " << GetDataStartAddress();
+void Simd128Value::Simd128ValuePrint(std::ostream& os) { // NOLINT
+#define PRINT_SIMD128_VALUE(TYPE, Type, type, lane_count, lane_type) \
+ if (Is##Type()) return Type::cast(this)->Type##Print(os);
+ SIMD128_TYPES(PRINT_SIMD128_VALUE)
+#undef PRINT_SIMD128_VALUE
+ UNREACHABLE();
}
-void FreeSpace::FreeSpacePrint(std::ostream& os) { // NOLINT
- os << "free space, size " << Size();
+void Float32x4::Float32x4Print(std::ostream& os) { // NOLINT
+ char arr[100];
+ Vector<char> buffer(arr, arraysize(arr));
+ os << std::string(DoubleToCString(get_lane(0), buffer)) << ", "
+ << std::string(DoubleToCString(get_lane(1), buffer)) << ", "
+ << std::string(DoubleToCString(get_lane(2), buffer)) << ", "
+ << std::string(DoubleToCString(get_lane(3), buffer));
}
-#define EXTERNAL_ARRAY_PRINTER(Type, type, TYPE, ctype, size) \
- void External##Type##Array::External##Type##ArrayPrint(std::ostream& os) { \
- os << "external " #type " array"; \
+#define SIMD128_INT_PRINT_FUNCTION(type, lane_count) \
+ void type::type##Print(std::ostream& os) { \
+ char arr[100]; \
+ Vector<char> buffer(arr, arraysize(arr)); \
+ os << std::string(IntToCString(get_lane(0), buffer)); \
+ for (int i = 1; i < lane_count; i++) { \
+ os << ", " << std::string(IntToCString(get_lane(i), buffer)); \
+ } \
}
+SIMD128_INT_PRINT_FUNCTION(Int32x4, 4)
+SIMD128_INT_PRINT_FUNCTION(Int16x8, 8)
+SIMD128_INT_PRINT_FUNCTION(Int8x16, 16)
+#undef SIMD128_INT_PRINT_FUNCTION
+
+
+#define SIMD128_BOOL_PRINT_FUNCTION(type, lane_count) \
+ void type::type##Print(std::ostream& os) { \
+ char arr[100]; \
+ Vector<char> buffer(arr, arraysize(arr)); \
+ os << std::string(get_lane(0) ? "true" : "false"); \
+ for (int i = 1; i < lane_count; i++) { \
+ os << ", " << std::string(get_lane(i) ? "true" : "false"); \
+ } \
+ }
+SIMD128_BOOL_PRINT_FUNCTION(Bool32x4, 4)
+SIMD128_BOOL_PRINT_FUNCTION(Bool16x8, 8)
+SIMD128_BOOL_PRINT_FUNCTION(Bool8x16, 16)
+#undef SIMD128_BOOL_PRINT_FUNCTION
-TYPED_ARRAYS(EXTERNAL_ARRAY_PRINTER)
-#undef EXTERNAL_ARRAY_PRINTER
+void ByteArray::ByteArrayPrint(std::ostream& os) { // NOLINT
+ os << "byte array, data starts at " << GetDataStartAddress();
+}
+
+
+void BytecodeArray::BytecodeArrayPrint(std::ostream& os) { // NOLINT
+ Disassemble(os);
+}
+
+
+void FreeSpace::FreeSpacePrint(std::ostream& os) { // NOLINT
+ os << "free space, size " << Size();
+}
template <class Traits>
@@ -312,19 +351,6 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
break; \
}
- PRINT_ELEMENTS(EXTERNAL_UINT8_CLAMPED_ELEMENTS, ExternalUint8ClampedArray)
- PRINT_ELEMENTS(EXTERNAL_INT8_ELEMENTS, ExternalInt8Array)
- PRINT_ELEMENTS(EXTERNAL_UINT8_ELEMENTS,
- ExternalUint8Array)
- PRINT_ELEMENTS(EXTERNAL_INT16_ELEMENTS, ExternalInt16Array)
- PRINT_ELEMENTS(EXTERNAL_UINT16_ELEMENTS,
- ExternalUint16Array)
- PRINT_ELEMENTS(EXTERNAL_INT32_ELEMENTS, ExternalInt32Array)
- PRINT_ELEMENTS(EXTERNAL_UINT32_ELEMENTS,
- ExternalUint32Array)
- PRINT_ELEMENTS(EXTERNAL_FLOAT32_ELEMENTS, ExternalFloat32Array)
- PRINT_ELEMENTS(EXTERNAL_FLOAT64_ELEMENTS, ExternalFloat64Array)
-
PRINT_ELEMENTS(UINT8_ELEMENTS, FixedUint8Array)
PRINT_ELEMENTS(UINT8_CLAMPED_ELEMENTS, FixedUint8ClampedArray)
PRINT_ELEMENTS(INT8_ELEMENTS, FixedInt8Array)
@@ -411,10 +437,10 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Map");
os << " - type: " << TypeToString(instance_type()) << "\n";
os << " - instance size: " << instance_size() << "\n";
- os << " - inobject properties: " << inobject_properties() << "\n";
- os << " - elements kind: " << ElementsKindToString(elements_kind());
- os << "\n - pre-allocated property fields: "
- << pre_allocated_property_fields() << "\n";
+ if (IsJSObjectMap()) {
+ os << " - inobject properties: " << GetInObjectProperties() << "\n";
+ }
+ os << " - elements kind: " << ElementsKindToString(elements_kind()) << "\n";
os << " - unused property fields: " << unused_property_fields() << "\n";
if (is_deprecated()) os << " - deprecated_map\n";
if (is_stable()) os << " - stable_map\n";
@@ -505,6 +531,61 @@ void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
}
+void TypeFeedbackVector::Print() {
+ OFStream os(stdout);
+ TypeFeedbackVectorPrint(os);
+ os << std::flush;
+}
+
+
+void TypeFeedbackVector::TypeFeedbackVectorPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "TypeFeedbackVector");
+ os << " - length: " << length();
+ if (length() == 0) {
+ os << " (empty)\n";
+ return;
+ }
+
+ os << "\n - ics with type info: " << ic_with_type_info_count();
+ os << "\n - generic ics: " << ic_generic_count();
+
+ if (Slots() > 0) {
+ for (int i = 0; i < Slots(); i++) {
+ FeedbackVectorSlot slot(i);
+ os << "\n Slot " << i << " [" << GetIndex(slot)
+ << "]: " << Brief(Get(slot));
+ }
+ }
+
+ if (ICSlots() > 0) {
+ DCHECK(elements_per_ic_slot() == 2);
+
+ for (int i = 0; i < ICSlots(); i++) {
+ FeedbackVectorICSlot slot(i);
+ Code::Kind kind = GetKind(slot);
+ os << "\n ICSlot " << i;
+ if (kind == Code::LOAD_IC) {
+ LoadICNexus nexus(this, slot);
+ os << " LOAD_IC " << Code::ICState2String(nexus.StateFromFeedback());
+ } else if (kind == Code::KEYED_LOAD_IC) {
+ KeyedLoadICNexus nexus(this, slot);
+ os << " KEYED_LOAD_IC "
+ << Code::ICState2String(nexus.StateFromFeedback());
+ } else {
+ DCHECK(kind == Code::CALL_IC);
+ CallICNexus nexus(this, slot);
+ os << " CALL_IC " << Code::ICState2String(nexus.StateFromFeedback());
+ }
+
+ os << "\n [" << GetIndex(slot) << "]: " << Brief(Get(slot));
+ os << "\n [" << (GetIndex(slot) + 1)
+ << "]: " << Brief(get(GetIndex(slot) + 1));
+ }
+ }
+ os << "\n";
+}
+
+
void JSValue::JSValuePrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "ValueObject");
value()->Print(os);
@@ -748,10 +829,8 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
String* source = String::cast(Script::cast(script())->source());
int start = start_position();
int length = end_position() - start;
- SmartArrayPointer<char> source_string =
- source->ToCString(DISALLOW_NULLS,
- FAST_STRING_TRAVERSAL,
- start, length, NULL);
+ base::SmartArrayPointer<char> source_string = source->ToCString(
+ DISALLOW_NULLS, FAST_STRING_TRAVERSAL, start, length, NULL);
os << source_string.get();
}
// Script files are often large, hard to read.
@@ -765,7 +844,10 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
os << "\n - length = " << length();
os << "\n - optimized_code_map = " << Brief(optimized_code_map());
os << "\n - feedback_vector = ";
- feedback_vector()->FixedArrayPrint(os);
+ feedback_vector()->TypeFeedbackVectorPrint(os);
+ if (HasBytecodeArray()) {
+ os << "\n - bytecode_array = " << bytecode_array();
+ }
os << "\n";
}
@@ -993,7 +1075,6 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "DebugInfo");
os << "\n - shared: " << Brief(shared());
- os << "\n - original_code: " << Brief(original_code());
os << "\n - code: " << Brief(code());
os << "\n - break_points: ";
break_points()->Print(os);
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 5c863855f8..65d5d5f528 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -20,17 +20,16 @@
#include "src/compiler.h"
#include "src/cpu-profiler.h"
#include "src/date.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
#include "src/execution.h"
#include "src/field-index-inl.h"
#include "src/field-index.h"
-#include "src/full-codegen.h"
-#include "src/heap/mark-compact.h"
-#include "src/heap/objects-visiting-inl.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/hydrogen.h"
#include "src/ic/ic.h"
+#include "src/interpreter/bytecodes.h"
#include "src/log.h"
#include "src/lookup.h"
#include "src/macro-assembler.h"
@@ -73,16 +72,17 @@ MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
Handle<Context> native_context) {
if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
Handle<JSFunction> constructor;
- if (object->IsNumber()) {
+ if (object->IsSmi()) {
constructor = handle(native_context->number_function(), isolate);
- } else if (object->IsBoolean()) {
- constructor = handle(native_context->boolean_function(), isolate);
- } else if (object->IsString()) {
- constructor = handle(native_context->string_function(), isolate);
- } else if (object->IsSymbol()) {
- constructor = handle(native_context->symbol_function(), isolate);
} else {
- return MaybeHandle<JSReceiver>();
+ int constructor_function_index =
+ Handle<HeapObject>::cast(object)->map()->GetConstructorFunctionIndex();
+ if (constructor_function_index == Map::kNoConstructorFunctionIndex) {
+ return MaybeHandle<JSReceiver>();
+ }
+ constructor = handle(
+ JSFunction::cast(native_context->get(constructor_function_index)),
+ isolate);
}
Handle<JSObject> result = isolate->factory()->NewJSObject(constructor);
Handle<JSValue>::cast(result)->set_value(*object);
@@ -97,11 +97,29 @@ bool Object::BooleanValue() {
if (IsUndetectableObject()) return false; // Undetectable object is false.
if (IsString()) return String::cast(this)->length() != 0;
if (IsHeapNumber()) return HeapNumber::cast(this)->HeapNumberBooleanValue();
- if (IsFloat32x4()) return true; // Simd value types always evaluate to true.
+ if (IsSimd128Value()) return true; // Simd value types evaluate to true.
return true;
}
+bool Object::StrictEquals(Object* that) {
+ if (this->IsNumber()) {
+ if (!that->IsNumber()) return false;
+ double const x = this->Number();
+ double const y = that->Number();
+ // Must check explicitly for NaN:s on Windows, but -0 works fine.
+ return x == y && !std::isnan(x) && !std::isnan(y);
+ } else if (this->IsString()) {
+ if (!that->IsString()) return false;
+ return String::cast(this)->Equals(String::cast(that));
+ } else if (this->IsSimd128Value()) {
+ if (!that->IsSimd128Value()) return false;
+ return Simd128Value::cast(this)->Equals(Simd128Value::cast(that));
+ }
+ return this == that;
+}
+
+
bool Object::IsCallable() const {
const Object* fun = this;
while (fun->IsJSFunctionProxy()) {
@@ -465,7 +483,8 @@ MaybeHandle<Object> Object::SetPropertyWithDefinedSetter(
}
-static bool FindAllCanReadHolder(LookupIterator* it) {
+// static
+bool JSObject::AllCanRead(LookupIterator* it) {
// Skip current iteration, it's in state ACCESS_CHECK or INTERCEPTOR, both of
// which have already been checked.
DCHECK(it->state() == LookupIterator::ACCESS_CHECK ||
@@ -487,7 +506,7 @@ static bool FindAllCanReadHolder(LookupIterator* it) {
MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
LookupIterator* it) {
Handle<JSObject> checked = it->GetHolder<JSObject>();
- while (FindAllCanReadHolder(it)) {
+ while (AllCanRead(it)) {
if (it->state() == LookupIterator::ACCESSOR) {
return GetPropertyWithAccessor(it, SLOPPY);
}
@@ -507,7 +526,7 @@ MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithFailedAccessCheck(
LookupIterator* it) {
Handle<JSObject> checked = it->GetHolder<JSObject>();
- while (FindAllCanReadHolder(it)) {
+ while (AllCanRead(it)) {
if (it->state() == LookupIterator::ACCESSOR) {
return Just(it->property_details().attributes());
}
@@ -523,7 +542,8 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithFailedAccessCheck(
}
-static bool FindAllCanWriteHolder(LookupIterator* it) {
+// static
+bool JSObject::AllCanWrite(LookupIterator* it) {
for (; it->IsFound(); it->Next()) {
if (it->state() == LookupIterator::ACCESSOR) {
Handle<Object> accessors = it->GetAccessors();
@@ -539,7 +559,7 @@ static bool FindAllCanWriteHolder(LookupIterator* it) {
MaybeHandle<Object> JSObject::SetPropertyWithFailedAccessCheck(
LookupIterator* it, Handle<Object> value) {
Handle<JSObject> checked = it->GetHolder<JSObject>();
- if (FindAllCanWriteHolder(it)) {
+ if (AllCanWrite(it)) {
// The supplied language-mode is ignored by SetPropertyWithAccessor.
return SetPropertyWithAccessor(it, value, SLOPPY);
}
@@ -596,33 +616,36 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
}
+bool Object::HasInPrototypeChain(Isolate* isolate, Object* target) {
+ PrototypeIterator iter(isolate, this, PrototypeIterator::START_AT_RECEIVER);
+ while (true) {
+ iter.AdvanceIgnoringProxies();
+ if (iter.IsAtEnd()) return false;
+ if (iter.IsAtEnd(target)) return true;
+ }
+}
+
+
Map* Object::GetRootMap(Isolate* isolate) {
DisallowHeapAllocation no_alloc;
if (IsSmi()) {
- Context* context = isolate->context()->native_context();
- return context->number_function()->initial_map();
+ Context* native_context = isolate->context()->native_context();
+ return native_context->number_function()->initial_map();
}
- HeapObject* heap_object = HeapObject::cast(this);
-
- // The object is either a number, a string, a boolean,
+ // The object is either a number, a string, a symbol, a boolean, a SIMD value,
// a real JS object, or a Harmony proxy.
+ HeapObject* heap_object = HeapObject::cast(this);
if (heap_object->IsJSReceiver()) {
return heap_object->map();
}
- Context* context = isolate->context()->native_context();
-
- if (heap_object->IsHeapNumber()) {
- return context->number_function()->initial_map();
- }
- if (heap_object->IsString()) {
- return context->string_function()->initial_map();
- }
- if (heap_object->IsSymbol()) {
- return context->symbol_function()->initial_map();
- }
- if (heap_object->IsBoolean()) {
- return context->boolean_function()->initial_map();
+ int constructor_function_index =
+ heap_object->map()->GetConstructorFunctionIndex();
+ if (constructor_function_index != Map::kNoConstructorFunctionIndex) {
+ Context* native_context = isolate->context()->native_context();
+ JSFunction* constructor_function =
+ JSFunction::cast(native_context->get(constructor_function_index));
+ return constructor_function->initial_map();
}
return isolate->heap()->null_value()->map();
}
@@ -639,7 +662,7 @@ Object* Object::GetHash() {
Object* Object::GetSimpleHash() {
// The object is either a Smi, a HeapNumber, a name, an odd-ball,
- // a real JS object, or a Harmony proxy.
+ // a SIMD value type, a real JS object, or a Harmony proxy.
if (IsSmi()) {
uint32_t hash = ComputeIntegerHash(Smi::cast(this)->value(), kZeroHashSeed);
return Smi::FromInt(hash & Smi::kMaxValue);
@@ -662,6 +685,10 @@ Object* Object::GetSimpleHash() {
uint32_t hash = Oddball::cast(this)->to_string()->Hash();
return Smi::FromInt(hash);
}
+ if (IsSimd128Value()) {
+ uint32_t hash = Simd128Value::cast(this)->Hash();
+ return Smi::FromInt(hash & Smi::kMaxValue);
+ }
DCHECK(IsJSReceiver());
JSReceiver* receiver = JSReceiver::cast(this);
return receiver->GetHeap()->undefined_value();
@@ -685,15 +712,36 @@ bool Object::SameValue(Object* other) {
if (IsNumber() && other->IsNumber()) {
double this_value = Number();
double other_value = other->Number();
- bool equal = this_value == other_value;
// SameValue(NaN, NaN) is true.
- if (!equal) return std::isnan(this_value) && std::isnan(other_value);
+ if (this_value != other_value) {
+ return std::isnan(this_value) && std::isnan(other_value);
+ }
// SameValue(0.0, -0.0) is false.
- return (this_value != 0) || ((1 / this_value) == (1 / other_value));
+ return (std::signbit(this_value) == std::signbit(other_value));
}
if (IsString() && other->IsString()) {
return String::cast(this)->Equals(String::cast(other));
}
+ if (IsSimd128Value() && other->IsSimd128Value()) {
+ if (IsFloat32x4() && other->IsFloat32x4()) {
+ Float32x4* a = Float32x4::cast(this);
+ Float32x4* b = Float32x4::cast(other);
+ for (int i = 0; i < 4; i++) {
+ float x = a->get_lane(i);
+ float y = b->get_lane(i);
+ // Implements the ES5 SameValue operation for floating point types.
+ // http://www.ecma-international.org/ecma-262/6.0/#sec-samevalue
+ if (x != y && !(std::isnan(x) && std::isnan(y))) return false;
+ if (std::signbit(x) != std::signbit(y)) return false;
+ }
+ return true;
+ } else {
+ Simd128Value* a = Simd128Value::cast(this);
+ Simd128Value* b = Simd128Value::cast(other);
+ return a->map()->instance_type() == b->map()->instance_type() &&
+ a->BitwiseEquals(b);
+ }
+ }
return false;
}
@@ -707,12 +755,32 @@ bool Object::SameValueZero(Object* other) {
double this_value = Number();
double other_value = other->Number();
// +0 == -0 is true
- return this_value == other_value
- || (std::isnan(this_value) && std::isnan(other_value));
+ return this_value == other_value ||
+ (std::isnan(this_value) && std::isnan(other_value));
}
if (IsString() && other->IsString()) {
return String::cast(this)->Equals(String::cast(other));
}
+ if (IsSimd128Value() && other->IsSimd128Value()) {
+ if (IsFloat32x4() && other->IsFloat32x4()) {
+ Float32x4* a = Float32x4::cast(this);
+ Float32x4* b = Float32x4::cast(other);
+ for (int i = 0; i < 4; i++) {
+ float x = a->get_lane(i);
+ float y = b->get_lane(i);
+ // Implements the ES6 SameValueZero operation for floating point types.
+ // http://www.ecma-international.org/ecma-262/6.0/#sec-samevaluezero
+ if (x != y && !(std::isnan(x) && std::isnan(y))) return false;
+ // SameValueZero doesn't distinguish between 0 and -0.
+ }
+ return true;
+ } else {
+ Simd128Value* a = Simd128Value::cast(this);
+ Simd128Value* b = Simd128Value::cast(other);
+ return a->map()->instance_type() == b->map()->instance_type() &&
+ a->BitwiseEquals(b);
+ }
+ }
return false;
}
@@ -861,8 +929,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
self->set_resource(resource);
if (is_internalized) self->Hash(); // Force regeneration of the hash value.
- heap->AdjustLiveBytes(this->address(), new_size - size,
- Heap::CONCURRENT_TO_SWEEPER);
+ heap->AdjustLiveBytes(this, new_size - size, Heap::CONCURRENT_TO_SWEEPER);
return true;
}
@@ -922,8 +989,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
self->set_resource(resource);
if (is_internalized) self->Hash(); // Force regeneration of the hash value.
- heap->AdjustLiveBytes(this->address(), new_size - size,
- Heap::CONCURRENT_TO_SWEEPER);
+ heap->AdjustLiveBytes(this, new_size - size, Heap::CONCURRENT_TO_SWEEPER);
return true;
}
@@ -1250,14 +1316,13 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case BYTE_ARRAY_TYPE:
os << "<ByteArray[" << ByteArray::cast(this)->length() << "]>";
break;
+ case BYTECODE_ARRAY_TYPE:
+ os << "<BytecodeArray[" << BytecodeArray::cast(this)->length() << "]>";
+ break;
case FREE_SPACE_TYPE:
os << "<FreeSpace[" << FreeSpace::cast(this)->Size() << "]>";
break;
#define TYPED_ARRAY_SHORT_PRINT(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ARRAY_TYPE: \
- os << "<External" #Type "Array[" \
- << External##Type##Array::cast(this)->length() << "]>"; \
- break; \
case FIXED_##TYPE##_ARRAY_TYPE: \
os << "<Fixed" #Type "Array[" << Fixed##Type##Array::cast(this)->length() \
<< "]>"; \
@@ -1268,7 +1333,7 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case SHARED_FUNCTION_INFO_TYPE: {
SharedFunctionInfo* shared = SharedFunctionInfo::cast(this);
- SmartArrayPointer<char> debug_name =
+ base::SmartArrayPointer<char> debug_name =
shared->DebugName()->ToCString();
if (debug_name[0] != 0) {
os << "<SharedFunctionInfo " << debug_name.get() << ">";
@@ -1324,10 +1389,15 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << '>';
break;
}
- case FLOAT32X4_TYPE: {
- os << "<Float32x4: ";
- Float32x4::cast(this)->Float32x4Print(os);
- os << ">";
+ case SIMD128_VALUE_TYPE: {
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ if (Is##Type()) { \
+ os << "<" #Type ">"; \
+ break; \
+ }
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ UNREACHABLE();
break;
}
case JS_PROXY_TYPE:
@@ -1380,155 +1450,53 @@ void HeapObject::Iterate(ObjectVisitor* v) {
}
-void HeapObject::IterateBody(InstanceType type, int object_size,
- ObjectVisitor* v) {
- // Avoiding <Type>::cast(this) because it accesses the map pointer field.
- // During GC, the map pointer field is encoded.
- if (type < FIRST_NONSTRING_TYPE) {
- switch (type & kStringRepresentationMask) {
- case kSeqStringTag:
- break;
- case kConsStringTag:
- ConsString::BodyDescriptor::IterateBody(this, v);
- break;
- case kSlicedStringTag:
- SlicedString::BodyDescriptor::IterateBody(this, v);
- break;
- case kExternalStringTag:
- if ((type & kStringEncodingMask) == kOneByteStringTag) {
- reinterpret_cast<ExternalOneByteString*>(this)
- ->ExternalOneByteStringIterateBody(v);
- } else {
- reinterpret_cast<ExternalTwoByteString*>(this)->
- ExternalTwoByteStringIterateBody(v);
- }
- break;
- }
- return;
- }
+bool HeapNumber::HeapNumberBooleanValue() {
+ return DoubleToBoolean(value());
+}
- switch (type) {
- case FIXED_ARRAY_TYPE:
- FixedArray::BodyDescriptor::IterateBody(this, object_size, v);
- break;
- case FIXED_DOUBLE_ARRAY_TYPE:
- break;
- case JS_OBJECT_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_GENERATOR_OBJECT_TYPE:
- case JS_MODULE_TYPE:
- case JS_VALUE_TYPE:
- case JS_DATE_TYPE:
- case JS_ARRAY_TYPE:
- case JS_TYPED_ARRAY_TYPE:
- case JS_DATA_VIEW_TYPE:
- case JS_SET_TYPE:
- case JS_MAP_TYPE:
- case JS_SET_ITERATOR_TYPE:
- case JS_MAP_ITERATOR_TYPE:
- case JS_WEAK_MAP_TYPE:
- case JS_WEAK_SET_TYPE:
- case JS_REGEXP_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_BUILTINS_OBJECT_TYPE:
- case JS_MESSAGE_OBJECT_TYPE:
- JSObject::BodyDescriptor::IterateBody(this, object_size, v);
- break;
- case JS_ARRAY_BUFFER_TYPE:
- JSArrayBuffer::JSArrayBufferIterateBody(this, v);
- break;
- case JS_FUNCTION_TYPE:
- reinterpret_cast<JSFunction*>(this)
- ->JSFunctionIterateBody(object_size, v);
- break;
- case ODDBALL_TYPE:
- Oddball::BodyDescriptor::IterateBody(this, v);
- break;
- case JS_PROXY_TYPE:
- JSProxy::BodyDescriptor::IterateBody(this, v);
- break;
- case JS_FUNCTION_PROXY_TYPE:
- JSFunctionProxy::BodyDescriptor::IterateBody(this, v);
- break;
- case FOREIGN_TYPE:
- reinterpret_cast<Foreign*>(this)->ForeignIterateBody(v);
- break;
- case MAP_TYPE:
- Map::BodyDescriptor::IterateBody(this, v);
- break;
- case CODE_TYPE:
- reinterpret_cast<Code*>(this)->CodeIterateBody(v);
- break;
- case CELL_TYPE:
- Cell::BodyDescriptor::IterateBody(this, v);
- break;
- case PROPERTY_CELL_TYPE:
- PropertyCell::BodyDescriptor::IterateBody(this, v);
- break;
- case WEAK_CELL_TYPE:
- WeakCell::BodyDescriptor::IterateBody(this, v);
- break;
- case SYMBOL_TYPE:
- Symbol::BodyDescriptor::IterateBody(this, v);
- break;
- case HEAP_NUMBER_TYPE:
- case MUTABLE_HEAP_NUMBER_TYPE:
- case FLOAT32X4_TYPE:
- case FILLER_TYPE:
- case BYTE_ARRAY_TYPE:
- case FREE_SPACE_TYPE:
- break;
+void HeapNumber::HeapNumberPrint(std::ostream& os) { // NOLINT
+ os << value();
+}
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ARRAY_TYPE: \
- break; \
- \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- reinterpret_cast<FixedTypedArrayBase*>(this) \
- ->FixedTypedArrayBaseIterateBody(v); \
- break;
+#define FIELD_ADDR_CONST(p, offset) \
+ (reinterpret_cast<const byte*>(p) + offset - kHeapObjectTag)
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
+#define READ_INT32_FIELD(p, offset) \
+ (*reinterpret_cast<const int32_t*>(FIELD_ADDR_CONST(p, offset)))
- case SHARED_FUNCTION_INFO_TYPE: {
- SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
- break;
- }
+#define READ_INT64_FIELD(p, offset) \
+ (*reinterpret_cast<const int64_t*>(FIELD_ADDR_CONST(p, offset)))
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE:
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- if (type == ALLOCATION_SITE_TYPE) {
- AllocationSite::BodyDescriptor::IterateBody(this, v);
- } else {
- StructBodyDescriptor::IterateBody(this, object_size, v);
- }
- break;
- default:
- PrintF("Unknown type: %d\n", type);
- UNREACHABLE();
- }
-}
+#define READ_BYTE_FIELD(p, offset) \
+ (*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset)))
-bool HeapNumber::HeapNumberBooleanValue() {
- return DoubleToBoolean(value());
+bool Simd128Value::BitwiseEquals(const Simd128Value* other) const {
+ return READ_INT64_FIELD(this, kValueOffset) ==
+ READ_INT64_FIELD(other, kValueOffset) &&
+ READ_INT64_FIELD(this, kValueOffset + kInt64Size) ==
+ READ_INT64_FIELD(other, kValueOffset + kInt64Size);
}
-void HeapNumber::HeapNumberPrint(std::ostream& os) { // NOLINT
- os << value();
+uint32_t Simd128Value::Hash() const {
+ uint32_t seed = v8::internal::kZeroHashSeed;
+ uint32_t hash;
+ hash = ComputeIntegerHash(READ_INT32_FIELD(this, kValueOffset), seed);
+ hash = ComputeIntegerHash(
+ READ_INT32_FIELD(this, kValueOffset + 1 * kInt32Size), hash * 31);
+ hash = ComputeIntegerHash(
+ READ_INT32_FIELD(this, kValueOffset + 2 * kInt32Size), hash * 31);
+ hash = ComputeIntegerHash(
+ READ_INT32_FIELD(this, kValueOffset + 3 * kInt32Size), hash * 31);
+ return hash;
}
-void Float32x4::Float32x4Print(std::ostream& os) { // NOLINT
- os << get_lane(0) << ", " << get_lane(1) << ", " << get_lane(2) << ", "
- << get_lane(3);
+void Simd128Value::CopyBits(void* destination) const {
+ memcpy(destination, &READ_BYTE_FIELD(this, kValueOffset), kSimd128Size);
}
@@ -1748,11 +1716,11 @@ bool Map::InstancesNeedRewriting(Map* target, int target_number_of_fields,
// If no fields were added, and no inobject properties were removed, setting
// the map is sufficient.
- if (target_inobject == inobject_properties()) return false;
+ if (target_inobject == GetInObjectProperties()) return false;
// In-object slack tracking may have reduced the object size of the new map.
// In that case, succeed if all existing fields were inobject, and they still
// fit within the new inobject size.
- DCHECK(target_inobject < inobject_properties());
+ DCHECK(target_inobject < GetInObjectProperties());
if (target_number_of_fields <= target_inobject) {
DCHECK(target_number_of_fields + target_unused == target_inobject);
return false;
@@ -1859,7 +1827,7 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
Handle<Map> old_map(object->map());
int old_number_of_fields;
int number_of_fields = new_map->NumberOfFields();
- int inobject = new_map->inobject_properties();
+ int inobject = new_map->GetInObjectProperties();
int unused = new_map->unused_property_fields();
// Nothing to do if no functions were converted to fields and no smis were
@@ -1895,9 +1863,10 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
DCHECK(number_of_fields == old_number_of_fields + 1);
// This migration is a transition from a map that has run out of property
// space. Therefore it could be done by extending the backing store.
+ int grow_by = external - object->properties()->length();
Handle<FixedArray> old_storage = handle(object->properties(), isolate);
Handle<FixedArray> new_storage =
- FixedArray::CopySize(old_storage, external);
+ isolate->factory()->CopyFixedArrayAndGrow(old_storage, grow_by);
// Properly initialize newly added property.
Handle<Object> value;
@@ -2027,7 +1996,7 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
Address address = object->address();
heap->CreateFillerObjectAt(
address + new_instance_size, instance_size_delta);
- heap->AdjustLiveBytes(address, -instance_size_delta,
+ heap->AdjustLiveBytes(*object, -instance_size_delta,
Heap::CONCURRENT_TO_SWEEPER);
}
@@ -2255,10 +2224,23 @@ void Map::UpdateFieldType(int descriptor, Handle<Name> name,
}
+bool FieldTypeIsCleared(Representation rep, Handle<HeapType> type) {
+ return type->Is(HeapType::None()) && rep.IsHeapObject();
+}
+
+
// static
-Handle<HeapType> Map::GeneralizeFieldType(Handle<HeapType> type1,
+Handle<HeapType> Map::GeneralizeFieldType(Representation rep1,
+ Handle<HeapType> type1,
+ Representation rep2,
Handle<HeapType> type2,
Isolate* isolate) {
+ // Cleared field types need special treatment. They represent lost knowledge,
+ // so we must be conservative, so their generalization with any other type
+ // is "Any".
+ if (FieldTypeIsCleared(rep1, type1) || FieldTypeIsCleared(rep2, type2)) {
+ return HeapType::Any(isolate);
+ }
if (type1->NowIs(type2)) return type2;
if (type2->NowIs(type1)) return type1;
return HeapType::Any(isolate);
@@ -2279,10 +2261,13 @@ void Map::GeneralizeFieldType(Handle<Map> map, int modify_index,
isolate);
if (old_representation.Equals(new_representation) &&
+ !FieldTypeIsCleared(new_representation, new_field_type) &&
+ // Checking old_field_type for being cleared is not necessary because
+ // the NowIs check below would fail anyway in that case.
new_field_type->NowIs(old_field_type)) {
- DCHECK(Map::GeneralizeFieldType(old_field_type,
- new_field_type,
- isolate)->NowIs(old_field_type));
+ DCHECK(Map::GeneralizeFieldType(old_representation, old_field_type,
+ new_representation, new_field_type, isolate)
+ ->NowIs(old_field_type));
return;
}
@@ -2291,17 +2276,10 @@ void Map::GeneralizeFieldType(Handle<Map> map, int modify_index,
Handle<DescriptorArray> descriptors(
field_owner->instance_descriptors(), isolate);
DCHECK_EQ(*old_field_type, descriptors->GetFieldType(modify_index));
- bool old_field_type_was_cleared =
- old_field_type->Is(HeapType::None()) && old_representation.IsHeapObject();
- // Determine the generalized new field type. Conservatively assume type Any
- // for cleared field types because the cleared type could have been a
- // deprecated map and there still could be live instances with a non-
- // deprecated version of the map.
new_field_type =
- old_field_type_was_cleared
- ? HeapType::Any(isolate)
- : Map::GeneralizeFieldType(old_field_type, new_field_type, isolate);
+ Map::GeneralizeFieldType(old_representation, old_field_type,
+ new_representation, new_field_type, isolate);
PropertyDetails details = descriptors->GetDetails(modify_index);
Handle<Name> name(descriptors->GetKey(modify_index));
@@ -2525,8 +2503,10 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
Handle<HeapType> old_field_type =
GetFieldType(isolate, old_descriptors, i,
old_details.location(), tmp_representation);
- next_field_type =
- GeneralizeFieldType(next_field_type, old_field_type, isolate);
+ Representation old_representation = old_details.representation();
+ next_field_type = GeneralizeFieldType(
+ old_representation, old_field_type, new_representation,
+ next_field_type, isolate);
}
} else {
Handle<HeapType> old_field_type =
@@ -2690,21 +2670,24 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
Handle<HeapType> next_field_type;
if (modify_index == i) {
- next_field_type =
- GeneralizeFieldType(target_field_type, new_field_type, isolate);
+ next_field_type = GeneralizeFieldType(
+ target_details.representation(), target_field_type,
+ new_representation, new_field_type, isolate);
if (!property_kind_reconfiguration) {
Handle<HeapType> old_field_type =
GetFieldType(isolate, old_descriptors, i,
old_details.location(), next_representation);
- next_field_type =
- GeneralizeFieldType(next_field_type, old_field_type, isolate);
+ next_field_type = GeneralizeFieldType(
+ old_details.representation(), old_field_type,
+ next_representation, next_field_type, isolate);
}
} else {
Handle<HeapType> old_field_type =
GetFieldType(isolate, old_descriptors, i, old_details.location(),
next_representation);
- next_field_type =
- GeneralizeFieldType(target_field_type, old_field_type, isolate);
+ next_field_type = GeneralizeFieldType(
+ old_details.representation(), old_field_type, next_representation,
+ target_field_type, isolate);
}
Handle<Object> wrapped_type(WrapType(next_field_type));
DataDescriptor d(target_key, current_offset, wrapped_type,
@@ -2765,8 +2748,9 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
Handle<HeapType> old_field_type =
GetFieldType(isolate, old_descriptors, i,
old_details.location(), next_representation);
- next_field_type =
- GeneralizeFieldType(next_field_type, old_field_type, isolate);
+ next_field_type = GeneralizeFieldType(
+ old_details.representation(), old_field_type,
+ next_representation, next_field_type, isolate);
}
} else {
Handle<HeapType> old_field_type =
@@ -3293,8 +3277,7 @@ MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
Handle<Object> to_assign = value;
// Convert the incoming value to a number for storing into typed arrays.
- if (it->IsElement() && (receiver->HasExternalArrayElements() ||
- receiver->HasFixedTypedArrayElements())) {
+ if (it->IsElement() && receiver->HasFixedTypedArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
ASSIGN_RETURN_ON_EXCEPTION(it->isolate(), to_assign,
Execution::ToNumber(it->isolate(), value),
@@ -3329,6 +3312,11 @@ MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
Object);
}
+#if VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ receiver->JSObjectVerify();
+ }
+#endif
return value;
}
@@ -3412,13 +3400,11 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
}
if (FLAG_trace_external_array_abuse &&
- (array->HasExternalArrayElements() ||
- array->HasFixedTypedArrayElements())) {
+ array->HasFixedTypedArrayElements()) {
CheckArrayAbuse(array, "typed elements write", it->index(), true);
}
- if (FLAG_trace_js_array_abuse && !array->HasExternalArrayElements() &&
- !array->HasFixedTypedArrayElements()) {
+ if (FLAG_trace_js_array_abuse && !array->HasFixedTypedArrayElements()) {
CheckArrayAbuse(array, "elements write", it->index(), false);
}
}
@@ -3453,6 +3439,11 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
it->factory()->the_hole_value()),
Object);
}
+#if VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ receiver->JSObjectVerify();
+ }
+#endif
}
return value;
@@ -3635,19 +3626,6 @@ Handle<Map> Map::FindTransitionedMap(Handle<Map> map,
static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
Map* current_map = map;
- // Support for legacy API: SetIndexedPropertiesTo{External,Pixel}Data
- // allows to change elements from arbitrary kind to any ExternalArray
- // elements kind. Satisfy its requirements, checking whether we already
- // have the cached transition.
- if (IsExternalArrayElementsKind(to_kind) &&
- !IsFixedTypedArrayElementsKind(map->elements_kind())) {
- Map* next_map = map->ElementsTransitionMap();
- if (next_map != NULL && next_map->elements_kind() == to_kind) {
- return next_map;
- }
- return map;
- }
-
ElementsKind kind = map->elements_kind();
while (kind != to_kind) {
Map* next_map = current_map->ElementsTransitionMap();
@@ -4018,7 +3996,7 @@ Maybe<PropertyAttributes> JSProxy::GetPropertyAttributesWithHandler(
Handle<Object> error = isolate->factory()->NewTypeError(
MessageTemplate::kProxyPropNotConfigurable, handler, name, trap);
isolate->Throw(*error);
- return Just(NONE);
+ return Nothing<PropertyAttributes>();
}
int attributes = NONE;
@@ -4081,7 +4059,8 @@ MaybeHandle<Object> JSProxy::CallTrap(Handle<JSProxy> proxy,
void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
- DCHECK(object->map()->inobject_properties() == map->inobject_properties());
+ DCHECK(object->map()->GetInObjectProperties() ==
+ map->GetInObjectProperties());
ElementsKind obj_kind = object->map()->elements_kind();
ElementsKind map_kind = map->elements_kind();
if (map_kind != obj_kind) {
@@ -4109,6 +4088,11 @@ void JSObject::MigrateInstance(Handle<JSObject> object) {
if (FLAG_trace_migration) {
object->PrintInstanceMigration(stdout, *original_map, *map);
}
+#if VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ object->JSObjectVerify();
+ }
+#endif
}
@@ -4125,6 +4109,11 @@ bool JSObject::TryMigrateInstance(Handle<JSObject> object) {
if (FLAG_trace_migration) {
object->PrintInstanceMigration(stdout, *original_map, object->map());
}
+#if VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ object->JSObjectVerify();
+ }
+#endif
return true;
}
@@ -4177,7 +4166,9 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
case LookupIterator::ACCESS_CHECK:
if (!it->HasAccess()) {
- return SetPropertyWithFailedAccessCheck(it, value);
+ it->isolate()->ReportFailedAccessCheck(it->GetHolder<JSObject>());
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
+ return value;
}
break;
@@ -4228,16 +4219,9 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
ExecutableAccessorInfo::ClearSetter(new_data);
}
- if (it->IsElement()) {
- SetElementCallback(it->GetHolder<JSObject>(), it->index(), new_data,
- attributes);
- } else {
- SetPropertyCallback(it->GetHolder<JSObject>(), it->name(), new_data,
- attributes);
- }
+ it->TransitionToAccessorPair(new_data, attributes);
} else {
it->ReconfigureDataProperty(value, attributes);
- it->WriteDataValue(value);
}
if (is_observed) {
@@ -4264,8 +4248,7 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
// Special case: properties of typed arrays cannot be reconfigured to
// non-writable nor to non-enumerable.
- if (it->IsElement() && (object->HasExternalArrayElements() ||
- object->HasFixedTypedArrayElements())) {
+ if (it->IsElement() && object->HasFixedTypedArrayElements()) {
return RedefineNonconfigurableProperty(it->isolate(), it->GetName(),
value, STRICT);
}
@@ -4274,7 +4257,6 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
if (is_observed) old_value = it->GetDataValue();
it->ReconfigureDataProperty(value, attributes);
- it->WriteDataValue(value);
if (is_observed) {
if (old_value->SameValue(*value)) {
@@ -4586,7 +4568,7 @@ void JSObject::MigrateFastToSlow(Handle<JSObject> object,
Heap* heap = isolate->heap();
heap->CreateFillerObjectAt(object->address() + new_instance_size,
instance_size_delta);
- heap->AdjustLiveBytes(object->address(), -instance_size_delta,
+ heap->AdjustLiveBytes(*object, -instance_size_delta,
Heap::CONCURRENT_TO_SWEEPER);
}
@@ -4598,7 +4580,7 @@ void JSObject::MigrateFastToSlow(Handle<JSObject> object,
// Ensure that in-object space of slow-mode object does not contain random
// garbage.
- int inobject_properties = new_map->inobject_properties();
+ int inobject_properties = new_map->GetInObjectProperties();
for (int i = 0; i < inobject_properties; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
object->RawFastPropertyAtPut(index, Smi::FromInt(0));
@@ -4655,7 +4637,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Handle<Map> old_map(object->map(), isolate);
- int inobject_props = old_map->inobject_properties();
+ int inobject_props = old_map->GetInObjectProperties();
// Allocate new map.
Handle<Map> new_map = Map::CopyDropDescriptors(old_map);
@@ -4781,9 +4763,8 @@ void JSObject::ResetElements(Handle<JSObject> object) {
static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
- Handle<FixedArrayBase> array,
- int length,
- Handle<SeededNumberDictionary> dictionary) {
+ Handle<FixedArrayBase> array, int length,
+ Handle<SeededNumberDictionary> dictionary, bool used_as_prototype) {
Isolate* isolate = array->GetIsolate();
Factory* factory = isolate->factory();
bool has_double_elements = array->IsFixedDoubleArray();
@@ -4802,49 +4783,66 @@ static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
}
if (!value->IsTheHole()) {
PropertyDetails details = PropertyDetails::Empty();
- dictionary =
- SeededNumberDictionary::AddNumberEntry(dictionary, i, value, details);
+ dictionary = SeededNumberDictionary::AddNumberEntry(
+ dictionary, i, value, details, used_as_prototype);
}
}
return dictionary;
}
+void JSObject::RequireSlowElements(SeededNumberDictionary* dictionary) {
+ if (dictionary->requires_slow_elements()) return;
+ dictionary->set_requires_slow_elements();
+ // TODO(verwaest): Remove this hack.
+ if (map()->is_prototype_map()) {
+ GetHeap()->ClearAllICsByKind(Code::KEYED_STORE_IC);
+ }
+}
+
+
+Handle<SeededNumberDictionary> JSObject::GetNormalizedElementDictionary(
+ Handle<JSObject> object, Handle<FixedArrayBase> elements) {
+ DCHECK(!object->HasDictionaryElements());
+ DCHECK(!object->HasSlowArgumentsElements());
+ Isolate* isolate = object->GetIsolate();
+ // Ensure that notifications fire if the array or object prototypes are
+ // normalizing.
+ isolate->UpdateArrayProtectorOnNormalizeElements(object);
+ int length = object->IsJSArray()
+ ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
+ : elements->length();
+ int used = object->GetFastElementsUsage();
+ Handle<SeededNumberDictionary> dictionary =
+ SeededNumberDictionary::New(isolate, used);
+ return CopyFastElementsToDictionary(elements, length, dictionary,
+ object->map()->is_prototype_map());
+}
+
+
Handle<SeededNumberDictionary> JSObject::NormalizeElements(
Handle<JSObject> object) {
- DCHECK(!object->HasExternalArrayElements() &&
- !object->HasFixedTypedArrayElements());
+ DCHECK(!object->HasFixedTypedArrayElements());
Isolate* isolate = object->GetIsolate();
// Find the backing store.
- Handle<FixedArrayBase> array(FixedArrayBase::cast(object->elements()));
- bool is_arguments =
- (array->map() == isolate->heap()->sloppy_arguments_elements_map());
+ Handle<FixedArrayBase> elements(object->elements(), isolate);
+ bool is_arguments = object->HasSloppyArgumentsElements();
if (is_arguments) {
- array = handle(FixedArrayBase::cast(
- Handle<FixedArray>::cast(array)->get(1)));
+ FixedArray* parameter_map = FixedArray::cast(*elements);
+ elements = handle(FixedArrayBase::cast(parameter_map->get(1)), isolate);
+ }
+
+ if (elements->IsDictionary()) {
+ return Handle<SeededNumberDictionary>::cast(elements);
}
- if (array->IsDictionary()) return Handle<SeededNumberDictionary>::cast(array);
DCHECK(object->HasFastSmiOrObjectElements() ||
object->HasFastDoubleElements() ||
object->HasFastArgumentsElements());
- // Ensure that notifications fire if the array or object prototypes are
- // normalizing.
- isolate->UpdateArrayProtectorOnNormalizeElements(object);
-
- // Compute the effective length and allocate a new backing store.
- int length = object->IsJSArray()
- ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
- : array->length();
- int old_capacity = 0;
- int used_elements = 0;
- object->GetElementsCapacityAndUsage(&old_capacity, &used_elements);
Handle<SeededNumberDictionary> dictionary =
- SeededNumberDictionary::New(isolate, used_elements);
-
- dictionary = CopyFastElementsToDictionary(array, length, dictionary);
+ GetNormalizedElementDictionary(object, elements);
// Switch to using the dictionary as the backing storage for elements.
ElementsKind target_kind =
@@ -5236,14 +5234,6 @@ MaybeHandle<Object> JSReceiver::DeleteProperty(LookupIterator* it,
return it->factory()->false_value();
}
- Handle<JSObject> holder = it->GetHolder<JSObject>();
- // TODO(verwaest): Remove this temporary compatibility hack when blink
- // tests are updated.
- if (!holder.is_identical_to(receiver) &&
- !(receiver->IsJSGlobalProxy() && holder->IsJSGlobalObject())) {
- return it->factory()->true_value();
- }
-
it->Delete();
if (is_observed) {
@@ -5337,7 +5327,6 @@ bool JSObject::ReferencesObject(Object* obj) {
// Raw pixels and external arrays do not reference other
// objects.
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: \
case TYPE##_ELEMENTS: \
break;
@@ -5447,8 +5436,7 @@ MaybeHandle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
}
// It's not possible to seal objects with external array elements
- if (object->HasExternalArrayElements() ||
- object->HasFixedTypedArrayElements()) {
+ if (object->HasFixedTypedArrayElements()) {
THROW_NEW_ERROR(
isolate, NewTypeError(MessageTemplate::kCannotPreventExtExternalArray),
Object);
@@ -5459,7 +5447,7 @@ MaybeHandle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
DCHECK(object->HasDictionaryElements() || object->HasSlowArgumentsElements());
// Make sure that we never go back to fast case.
- dictionary->set_requires_slow_elements();
+ object->RequireSlowElements(*dictionary);
// Do a map transition, other objects with this map may still
// be extensible.
@@ -5492,30 +5480,6 @@ bool JSObject::IsExtensible() {
}
-Handle<SeededNumberDictionary> JSObject::GetNormalizedElementDictionary(
- Handle<JSObject> object) {
- DCHECK(!object->elements()->IsDictionary());
- Isolate* isolate = object->GetIsolate();
- int length = object->IsJSArray()
- ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
- : object->elements()->length();
- if (length > 0) {
- int capacity = 0;
- int used = 0;
- object->GetElementsCapacityAndUsage(&capacity, &used);
- Handle<SeededNumberDictionary> new_element_dictionary =
- SeededNumberDictionary::New(isolate, used);
-
- // Move elements to a dictionary; avoid calling NormalizeElements to avoid
- // unnecessary transitions.
- return CopyFastElementsToDictionary(handle(object->elements()), length,
- new_element_dictionary);
- }
- // No existing elements, use a pre-allocated empty backing store
- return isolate->factory()->empty_slow_element_dictionary();
-}
-
-
template <typename Dictionary>
static void ApplyAttributesToDictionary(Dictionary* dictionary,
const PropertyAttributes attributes) {
@@ -5565,17 +5529,22 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
}
// It's not possible to seal or freeze objects with external array elements
- if (object->HasExternalArrayElements() ||
- object->HasFixedTypedArrayElements()) {
+ if (object->HasFixedTypedArrayElements()) {
THROW_NEW_ERROR(
isolate, NewTypeError(MessageTemplate::kCannotPreventExtExternalArray),
Object);
}
Handle<SeededNumberDictionary> new_element_dictionary;
- if (!object->elements()->IsDictionary()) {
- new_element_dictionary = GetNormalizedElementDictionary(object);
- isolate->UpdateArrayProtectorOnNormalizeElements(object);
+ if (!object->HasDictionaryElements()) {
+ int length =
+ object->IsJSArray()
+ ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
+ : object->elements()->length();
+ new_element_dictionary =
+ length == 0 ? isolate->factory()->empty_slow_element_dictionary()
+ : GetNormalizedElementDictionary(
+ object, handle(object->elements()));
}
Handle<Symbol> transition_marker;
@@ -5632,7 +5601,7 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
if (object->elements() != isolate->heap()->empty_slow_element_dictionary()) {
SeededNumberDictionary* dictionary = object->element_dictionary();
// Make sure we never go back to the fast case
- dictionary->set_requires_slow_elements();
+ object->RequireSlowElements(dictionary);
if (attrs != NONE) {
ApplyAttributesToDictionary(dictionary, attrs);
}
@@ -5829,7 +5798,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
// Deep copy own elements.
// Pixel elements cannot be created using an object literal.
- DCHECK(!copy->HasExternalArrayElements());
+ DCHECK(!copy->HasFixedTypedArrayElements());
switch (kind) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
@@ -5891,7 +5860,6 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: \
case TYPE##_ELEMENTS: \
TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -6142,10 +6110,13 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
Handle<JSFunction> arguments_function(
JSFunction::cast(isolate->sloppy_arguments_map()->GetConstructor()));
+ PrototypeIterator::WhereToEnd end = type == OWN_ONLY
+ ? PrototypeIterator::END_AT_NON_HIDDEN
+ : PrototypeIterator::END_AT_NULL;
// Only collect keys if access is permitted.
for (PrototypeIterator iter(isolate, object,
PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(); iter.Advance()) {
+ !iter.IsAtEnd(end); iter.Advance()) {
if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
Handle<JSProxy> proxy(JSProxy::cast(*PrototypeIterator::GetCurrent(iter)),
isolate);
@@ -6172,8 +6143,10 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
// Check access rights if required.
if (current->IsAccessCheckNeeded() && !isolate->MayAccess(current)) {
- isolate->ReportFailedAccessCheck(current);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, FixedArray);
+ if (iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
+ isolate->ReportFailedAccessCheck(current);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, FixedArray);
+ }
break;
}
@@ -6234,123 +6207,34 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
}
DCHECK(ContainsOnlyValidKeys(content));
}
-
- // If we only want own properties we bail out after the first
- // iteration.
- if (type == OWN_ONLY) break;
}
return content;
}
-// Try to update an accessor in an elements dictionary. Return true if the
-// update succeeded, and false otherwise.
-static bool UpdateGetterSetterInDictionary(
- SeededNumberDictionary* dictionary,
- uint32_t index,
- Object* getter,
- Object* setter,
- PropertyAttributes attributes) {
- int entry = dictionary->FindEntry(index);
- if (entry != SeededNumberDictionary::kNotFound) {
- Object* result = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == ACCESSOR_CONSTANT && result->IsAccessorPair()) {
- DCHECK(details.IsConfigurable());
- if (details.attributes() != attributes) {
- dictionary->DetailsAtPut(
- entry, PropertyDetails(attributes, ACCESSOR_CONSTANT, index,
- PropertyCellType::kNoCell));
- }
- AccessorPair::cast(result)->SetComponents(getter, setter);
- return true;
- }
- }
- return false;
-}
-
-
-void JSObject::DefineElementAccessor(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> getter,
- Handle<Object> setter,
- PropertyAttributes attributes) {
- switch (object->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- break;
-
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: \
- case TYPE##_ELEMENTS: \
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- // Ignore getters and setters on pixel and external array elements.
- return;
-
- case DICTIONARY_ELEMENTS:
- if (UpdateGetterSetterInDictionary(object->element_dictionary(),
- index,
- *getter,
- *setter,
- attributes)) {
- return;
- }
- break;
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
- // Ascertain whether we have read-only properties or an existing
- // getter/setter pair in an arguments elements dictionary backing
- // store.
- FixedArray* parameter_map = FixedArray::cast(object->elements());
- uint32_t length = parameter_map->length();
- Object* probe =
- index < (length - 2) ? parameter_map->get(index + 2) : NULL;
- if (probe == NULL || probe->IsTheHole()) {
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- if (arguments->IsDictionary()) {
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(arguments);
- if (UpdateGetterSetterInDictionary(dictionary,
- index,
- *getter,
- *setter,
- attributes)) {
- return;
- }
- }
- }
- break;
- }
- }
-
- Isolate* isolate = object->GetIsolate();
- Handle<AccessorPair> accessors = isolate->factory()->NewAccessorPair();
- accessors->SetComponents(*getter, *setter);
-
- SetElementCallback(object, index, accessors, attributes);
-}
-
-
bool Map::DictionaryElementsInPrototypeChainOnly() {
if (IsDictionaryElementsKind(elements_kind())) {
return false;
}
for (PrototypeIterator iter(this); !iter.IsAtEnd(); iter.Advance()) {
- if (iter.GetCurrent()->IsJSProxy()) {
- // Be conservative, don't walk into proxies.
+ // Be conservative, don't walk into proxies.
+ if (iter.GetCurrent()->IsJSProxy()) return true;
+ // String wrappers have non-configurable, non-writable elements.
+ if (iter.GetCurrent()->IsStringWrapper()) return true;
+ JSObject* current = JSObject::cast(iter.GetCurrent());
+
+ if (current->HasDictionaryElements() &&
+ current->element_dictionary()->requires_slow_elements()) {
return true;
}
- if (IsDictionaryElementsKind(
- JSObject::cast(iter.GetCurrent())->map()->elements_kind())) {
- return true;
+ if (current->HasSlowArgumentsElements()) {
+ FixedArray* parameter_map = FixedArray::cast(current->elements());
+ Object* arguments = parameter_map->get(1);
+ if (SeededNumberDictionary::cast(arguments)->requires_slow_elements()) {
+ return true;
+ }
}
}
@@ -6358,66 +6242,6 @@ bool Map::DictionaryElementsInPrototypeChainOnly() {
}
-void JSObject::SetElementCallback(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> structure,
- PropertyAttributes attributes) {
- Heap* heap = object->GetHeap();
- PropertyDetails details = PropertyDetails(attributes, ACCESSOR_CONSTANT, 0,
- PropertyCellType::kNoCell);
-
- // Normalize elements to make this operation simple.
- bool had_dictionary_elements = object->HasDictionaryElements();
- Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
- DCHECK(object->HasDictionaryElements() || object->HasSlowArgumentsElements());
- // Update the dictionary with the new ACCESSOR_CONSTANT property.
- dictionary = SeededNumberDictionary::Set(dictionary, index, structure,
- details);
- dictionary->set_requires_slow_elements();
-
- // Update the dictionary backing store on the object.
- if (object->elements()->map() == heap->sloppy_arguments_elements_map()) {
- // Also delete any parameter alias.
- //
- // TODO(kmillikin): when deleting the last parameter alias we could
- // switch to a direct backing store without the parameter map. This
- // would allow GC of the context.
- FixedArray* parameter_map = FixedArray::cast(object->elements());
- if (index < static_cast<uint32_t>(parameter_map->length()) - 2) {
- parameter_map->set(index + 2, heap->the_hole_value());
- }
- parameter_map->set(1, *dictionary);
- } else {
- object->set_elements(*dictionary);
-
- if (!had_dictionary_elements) {
- // KeyedStoreICs (at least the non-generic ones) need a reset.
- heap->ClearAllICsByKind(Code::KEYED_STORE_IC);
- }
- }
-}
-
-
-void JSObject::SetPropertyCallback(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> structure,
- PropertyAttributes attributes) {
- PropertyNormalizationMode mode = object->map()->is_prototype_map()
- ? KEEP_INOBJECT_PROPERTIES
- : CLEAR_INOBJECT_PROPERTIES;
- // Normalize object to make this operation simple.
- NormalizeProperties(object, mode, 0, "SetPropertyCallback");
-
-
- // Update the dictionary with the new ACCESSOR_CONSTANT property.
- PropertyDetails details = PropertyDetails(attributes, ACCESSOR_CONSTANT, 0,
- PropertyCellType::kMutable);
- SetNormalizedProperty(object, name, structure, details);
-
- ReoptimizeIfPrototype(object);
-}
-
-
MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> getter,
@@ -6425,13 +6249,6 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
PropertyAttributes attributes) {
Isolate* isolate = object->GetIsolate();
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc(isolate);
-
- // Try to flatten before operating on the string.
- if (name->IsString()) name = String::Flatten(Handle<String>::cast(name));
-
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
@@ -6444,6 +6261,11 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
it.Next();
}
+ // Ignore accessors on typed arrays.
+ if (it.IsElement() && object->HasFixedTypedArrayElements()) {
+ return it.factory()->undefined_value();
+ }
+
Handle<Object> old_value = isolate->factory()->the_hole_value();
bool is_observed = object->map()->is_observed() &&
!isolate->IsInternallyUsedPropertyName(name);
@@ -6457,25 +6279,20 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
}
}
- if (it.IsElement()) {
- DefineElementAccessor(it.GetStoreTarget(), it.index(), getter, setter,
- attributes);
- } else {
- DCHECK(getter->IsSpecFunction() || getter->IsUndefined() ||
- getter->IsNull());
- DCHECK(setter->IsSpecFunction() || setter->IsUndefined() ||
- setter->IsNull());
- // At least one of the accessors needs to be a new value.
- DCHECK(!getter->IsNull() || !setter->IsNull());
- if (!getter->IsNull()) {
- it.TransitionToAccessorProperty(ACCESSOR_GETTER, getter, attributes);
- }
- if (!setter->IsNull()) {
- it.TransitionToAccessorProperty(ACCESSOR_SETTER, setter, attributes);
- }
+ DCHECK(getter->IsSpecFunction() || getter->IsUndefined() || getter->IsNull());
+ DCHECK(setter->IsSpecFunction() || setter->IsUndefined() || setter->IsNull());
+ // At least one of the accessors needs to be a new value.
+ DCHECK(!getter->IsNull() || !setter->IsNull());
+ if (!getter->IsNull()) {
+ it.TransitionToAccessorProperty(ACCESSOR_GETTER, getter, attributes);
+ }
+ if (!setter->IsNull()) {
+ it.TransitionToAccessorProperty(ACCESSOR_SETTER, setter, attributes);
}
if (is_observed) {
+ // Make sure the top context isn't changed.
+ AssertNoContextChange ncc(isolate);
const char* type = preexists ? "reconfigure" : "add";
RETURN_ON_EXCEPTION(
isolate, EnqueueChangeRecord(object, type, name, old_value), Object);
@@ -6488,14 +6305,7 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
Handle<AccessorInfo> info) {
Isolate* isolate = object->GetIsolate();
-
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc(isolate);
-
- // Try to flatten before operating on the string.
- Handle<Name> name(Name::cast(info->name()));
- if (name->IsString()) name = String::Flatten(Handle<String>::cast(name));
+ Handle<Name> name(Name::cast(info->name()), isolate);
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
@@ -6514,25 +6324,20 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
it.Next();
}
+ // Ignore accessors on typed arrays.
+ if (it.IsElement() && object->HasFixedTypedArrayElements()) {
+ return it.factory()->undefined_value();
+ }
+
CHECK(GetPropertyAttributes(&it).IsJust());
// ES5 forbids turning a property into an accessor if it's not
// configurable. See 8.6.1 (Table 5).
- if (it.IsFound() && (it.IsReadOnly() || !it.IsConfigurable())) {
- return it.factory()->undefined_value();
- }
-
- // Ignore accessors on typed arrays.
- if (it.IsElement() && (object->HasFixedTypedArrayElements() ||
- object->HasExternalArrayElements())) {
+ if (it.IsFound() && !it.IsConfigurable()) {
return it.factory()->undefined_value();
}
- if (it.IsElement()) {
- SetElementCallback(object, it.index(), info, info->property_attributes());
- } else {
- SetPropertyCallback(object, name, info, info->property_attributes());
- }
+ it.TransitionToAccessorPair(info, info->property_attributes());
return object;
}
@@ -6720,13 +6525,13 @@ Handle<Map> Map::CopyNormalized(Handle<Map> map,
PropertyNormalizationMode mode) {
int new_instance_size = map->instance_size();
if (mode == CLEAR_INOBJECT_PROPERTIES) {
- new_instance_size -= map->inobject_properties() * kPointerSize;
+ new_instance_size -= map->GetInObjectProperties() * kPointerSize;
}
Handle<Map> result = RawCopy(map, new_instance_size);
if (mode != CLEAR_INOBJECT_PROPERTIES) {
- result->set_inobject_properties(map->inobject_properties());
+ result->SetInObjectProperties(map->GetInObjectProperties());
}
result->set_dictionary_map(true);
@@ -6744,11 +6549,9 @@ Handle<Map> Map::CopyDropDescriptors(Handle<Map> map) {
Handle<Map> result = RawCopy(map, map->instance_size());
// Please note instance_type and instance_size are set when allocated.
- result->set_inobject_properties(map->inobject_properties());
+ result->SetInObjectProperties(map->GetInObjectProperties());
result->set_unused_property_fields(map->unused_property_fields());
- result->set_pre_allocated_property_fields(
- map->pre_allocated_property_fields());
result->ClearCodeCache(map->GetHeap());
map->NotifyLeafMapLayoutChange();
return result;
@@ -6773,8 +6576,7 @@ Handle<Map> Map::ShareDescriptor(Handle<Map> map,
if (old_size == 0) {
descriptors = DescriptorArray::Allocate(map->GetIsolate(), 0, 1);
} else {
- int slack = SlackForArraySize(map->is_prototype_map(), old_size,
- kMaxNumberOfDescriptors);
+ int slack = SlackForArraySize(old_size, kMaxNumberOfDescriptors);
EnsureDescriptorSlack(map, slack);
descriptors = handle(map->instance_descriptors());
}
@@ -6940,13 +6742,10 @@ Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
Map* maybe_elements_transition_map = NULL;
if (flag == INSERT_TRANSITION) {
maybe_elements_transition_map = map->ElementsTransitionMap();
- DCHECK(
- maybe_elements_transition_map == NULL ||
- ((maybe_elements_transition_map->elements_kind() ==
- DICTIONARY_ELEMENTS ||
- IsExternalArrayElementsKind(
- maybe_elements_transition_map->elements_kind())) &&
- (kind == DICTIONARY_ELEMENTS || IsExternalArrayElementsKind(kind))));
+ DCHECK(maybe_elements_transition_map == NULL ||
+ (maybe_elements_transition_map->elements_kind() ==
+ DICTIONARY_ELEMENTS &&
+ kind == DICTIONARY_ELEMENTS));
DCHECK(!IsFastElementsKind(kind) ||
IsMoreGeneralElementsKindTransition(map->elements_kind(), kind));
DCHECK(kind != map->elements_kind());
@@ -7062,7 +6861,7 @@ Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
JSObject::kHeaderSize + kPointerSize * inobject_properties;
// Adjust the map with the extra inobject properties.
- copy->set_inobject_properties(inobject_properties);
+ copy->SetInObjectProperties(inobject_properties);
copy->set_unused_property_fields(inobject_properties);
copy->set_instance_size(new_instance_size);
copy->set_visitor_id(StaticVisitorBase::GetVisitorId(*copy));
@@ -7132,43 +6931,6 @@ bool DescriptorArray::CanHoldValue(int descriptor, Object* value) {
// static
-Handle<Map> Map::PrepareForDataElement(Handle<Map> map, Handle<Object> value) {
- ElementsKind kind = map->elements_kind();
- bool holey = IsHoleyElementsKind(kind);
-
- switch (kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- if (value->IsSmi()) return map;
- kind = value->IsNumber() ? FAST_DOUBLE_ELEMENTS : FAST_ELEMENTS;
- break;
-
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- if (value->IsNumber()) return map;
- kind = FAST_ELEMENTS;
- break;
-
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: \
- case TYPE##_ELEMENTS:
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- return map;
- }
-
- if (holey) kind = GetHoleyElementsKind(kind);
- return Map::AsElementsKind(map, kind);
-}
-
-
-// static
Handle<Map> Map::PrepareForDataProperty(Handle<Map> map, int descriptor,
Handle<Object> value) {
// Dictionaries can store any property value.
@@ -7280,12 +7042,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
Isolate* isolate = name->GetIsolate();
// Dictionary maps can always have additional data properties.
- if (map->is_dictionary_map()) {
- // For global objects, property cells are inlined. We need to change the
- // map.
- if (map->IsGlobalObjectMap()) return Copy(map, "GlobalAccessor");
- return map;
- }
+ if (map->is_dictionary_map()) return map;
// Migrate to the newest map before transitioning to the new property.
map = Update(map);
@@ -7602,10 +7359,11 @@ void CodeCache::UpdateDefaultCache(
// Extend the code cache with some new entries (at least one). Must be a
// multiple of the entry size.
- int new_length = length + ((length >> 1)) + kCodeCacheEntrySize;
+ Isolate* isolate = cache->GetIsolate();
+ int new_length = length + (length >> 1) + kCodeCacheEntrySize;
new_length = new_length - new_length % kCodeCacheEntrySize;
DCHECK((new_length % kCodeCacheEntrySize) == 0);
- cache = FixedArray::CopySize(cache, new_length);
+ cache = isolate->factory()->CopyFixedArrayAndGrow(cache, new_length - length);
// Add the (name, code) pair to the new cache.
cache->set(length + kCodeCacheEntryNameOffset, *name);
@@ -7998,27 +7756,6 @@ MaybeHandle<FixedArray> FixedArray::UnionOfKeys(Handle<FixedArray> first,
}
-Handle<FixedArray> FixedArray::CopySize(
- Handle<FixedArray> array, int new_length, PretenureFlag pretenure) {
- Isolate* isolate = array->GetIsolate();
- if (new_length == 0) return isolate->factory()->empty_fixed_array();
- Handle<FixedArray> result =
- isolate->factory()->NewFixedArray(new_length, pretenure);
- // Copy the content
- DisallowHeapAllocation no_gc;
- int len = array->length();
- if (new_length < len) len = new_length;
- // We are taking the map from the old fixed array so the map is sure to
- // be an immortal immutable object.
- result->set_map_no_write_barrier(array->map());
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < len; i++) {
- result->set(i, array->get(i), mode);
- }
- return result;
-}
-
-
void FixedArray::CopyTo(int pos, FixedArray* dest, int dest_pos, int len) {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = dest->GetWriteBarrierMode(no_gc);
@@ -8152,28 +7889,22 @@ Handle<WeakFixedArray> WeakFixedArray::Allocate(
DCHECK(0 <= size);
Handle<FixedArray> result =
isolate->factory()->NewUninitializedFixedArray(size + kFirstIndex);
- Handle<WeakFixedArray> casted_result = Handle<WeakFixedArray>::cast(result);
- if (initialize_from.is_null()) {
- for (int i = 0; i < result->length(); ++i) {
- result->set(i, Smi::FromInt(0));
- }
- } else {
+ int index = 0;
+ if (!initialize_from.is_null()) {
DCHECK(initialize_from->Length() <= size);
Handle<FixedArray> raw_source = Handle<FixedArray>::cast(initialize_from);
- int target_index = kFirstIndex;
- for (int source_index = kFirstIndex; source_index < raw_source->length();
- ++source_index) {
- // The act of allocating might have caused entries in the source array
- // to be cleared. Copy only what's needed.
- if (initialize_from->IsEmptySlot(source_index - kFirstIndex)) continue;
- result->set(target_index++, raw_source->get(source_index));
- }
- casted_result->set_last_used_index(target_index - 1 - kFirstIndex);
- for (; target_index < result->length(); ++target_index) {
- result->set(target_index, Smi::FromInt(0));
+ // Copy the entries without compacting, since the PrototypeInfo relies on
+ // the index of the entries not to change.
+ while (index < raw_source->length()) {
+ result->set(index, raw_source->get(index));
+ index++;
}
}
- return casted_result;
+ while (index < result->length()) {
+ result->set(index, Smi::FromInt(0));
+ index++;
+ }
+ return Handle<WeakFixedArray>::cast(result);
}
@@ -8209,9 +7940,12 @@ Handle<ArrayList> ArrayList::EnsureSpace(Handle<ArrayList> array, int length) {
int capacity = array->length();
bool empty = (capacity == 0);
if (capacity < kFirstIndex + length) {
- capacity = kFirstIndex + length;
- capacity = capacity + Max(capacity / 2, 2);
- array = Handle<ArrayList>::cast(FixedArray::CopySize(array, capacity));
+ Isolate* isolate = array->GetIsolate();
+ int new_capacity = kFirstIndex + length;
+ new_capacity = new_capacity + Max(new_capacity / 2, 2);
+ int grow_by = new_capacity - capacity;
+ array = Handle<ArrayList>::cast(
+ isolate->factory()->CopyFixedArrayAndGrow(array, grow_by));
if (empty) array->SetLength(0);
}
return array;
@@ -8469,13 +8203,12 @@ String::FlatContent String::GetFlatContent() {
}
-SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
- RobustnessFlag robust_flag,
- int offset,
- int length,
- int* length_return) {
+base::SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
+ RobustnessFlag robust_flag,
+ int offset, int length,
+ int* length_return) {
if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
- return SmartArrayPointer<char>(NULL);
+ return base::SmartArrayPointer<char>(NULL);
}
// Negative length means the to the end of the string.
if (length < 0) length = kMaxInt - offset;
@@ -8512,13 +8245,13 @@ SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
last = character;
}
result[utf8_byte_position] = 0;
- return SmartArrayPointer<char>(result);
+ return base::SmartArrayPointer<char>(result);
}
-SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
- RobustnessFlag robust_flag,
- int* length_return) {
+base::SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
+ RobustnessFlag robust_flag,
+ int* length_return) {
return ToCString(allow_nulls, robust_flag, 0, -1, length_return);
}
@@ -8544,9 +8277,10 @@ const uc16* String::GetTwoByteData(unsigned start) {
}
-SmartArrayPointer<uc16> String::ToWideCString(RobustnessFlag robust_flag) {
+base::SmartArrayPointer<uc16> String::ToWideCString(
+ RobustnessFlag robust_flag) {
if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
- return SmartArrayPointer<uc16>();
+ return base::SmartArrayPointer<uc16>();
}
StringCharacterStream stream(this);
@@ -8558,7 +8292,7 @@ SmartArrayPointer<uc16> String::ToWideCString(RobustnessFlag robust_flag) {
result[i++] = character;
}
result[i] = 0;
- return SmartArrayPointer<uc16>(result);
+ return base::SmartArrayPointer<uc16>(result);
}
@@ -9344,7 +9078,7 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
// that are a multiple of pointer size.
heap->CreateFillerObjectAt(start_of_string + new_size, delta);
}
- heap->AdjustLiveBytes(start_of_string, -delta, Heap::CONCURRENT_TO_SWEEPER);
+ heap->AdjustLiveBytes(*string, -delta, Heap::CONCURRENT_TO_SWEEPER);
// We are storing the new length using release store after creating a filler
// for the left-over space to avoid races with the sweeper thread.
@@ -9515,10 +9249,10 @@ bool Map::EquivalentToForTransition(Map* other) {
bool Map::EquivalentToForNormalization(Map* other,
PropertyNormalizationMode mode) {
- int properties = mode == CLEAR_INOBJECT_PROPERTIES
- ? 0 : other->inobject_properties();
+ int properties =
+ mode == CLEAR_INOBJECT_PROPERTIES ? 0 : other->GetInObjectProperties();
return CheckEquivalent(this, other) && bit_field2() == other->bit_field2() &&
- inobject_properties() == properties;
+ GetInObjectProperties() == properties;
}
@@ -9531,11 +9265,32 @@ void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
}
+bool JSFunction::Inlines(SharedFunctionInfo* candidate) {
+ DisallowHeapAllocation no_gc;
+ if (shared() == candidate) return true;
+ if (code()->kind() != Code::OPTIMIZED_FUNCTION) return false;
+ DeoptimizationInputData* const data =
+ DeoptimizationInputData::cast(code()->deoptimization_data());
+ if (data->length() == 0) return false;
+ FixedArray* const literals = data->LiteralArray();
+ int const inlined_count = data->InlinedFunctionCount()->value();
+ for (int i = 0; i < inlined_count; ++i) {
+ if (SharedFunctionInfo::cast(literals->get(i)) == candidate) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
void JSFunction::MarkForOptimization() {
Isolate* isolate = GetIsolate();
+ // Do not optimize if function contains break points.
+ if (shared()->HasDebugInfo()) return;
DCHECK(!IsOptimized());
DCHECK(shared()->allows_lazy_compilation() ||
!shared()->optimization_disabled());
+ DCHECK(!shared()->HasDebugInfo());
set_code_no_write_barrier(
isolate->builtins()->builtin(Builtins::kCompileOptimized));
// No write barrier required, since the builtin is part of the root set.
@@ -9615,6 +9370,7 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<FixedArray> literals,
BailoutId osr_ast_id) {
Isolate* isolate = shared->GetIsolate();
+ DCHECK(!shared->SearchOptimizedCodeMap(*native_context, osr_ast_id).code);
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
DCHECK(native_context->IsNativeContext());
STATIC_ASSERT(kEntryLength == 4);
@@ -9624,20 +9380,18 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
if (value->IsSmi()) {
// No optimized code map.
DCHECK_EQ(0, Smi::cast(*value)->value());
- new_code_map = isolate->factory()->NewFixedArray(kInitialLength);
+ new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
old_length = kEntriesStart;
} else {
- // Copy old map and append one new entry.
+ // Copy old optimized code map and append one new entry.
Handle<FixedArray> old_code_map = Handle<FixedArray>::cast(value);
- DCHECK(!shared->SearchOptimizedCodeMap(*native_context, osr_ast_id).code);
+ new_code_map = isolate->factory()->CopyFixedArrayAndGrow(
+ old_code_map, kEntryLength, TENURED);
old_length = old_code_map->length();
- new_code_map = FixedArray::CopySize(
- old_code_map, old_length + kEntryLength);
- // Zap the old map for the sake of the heap verifier.
- if (Heap::ShouldZapGarbage()) {
- Object** data = old_code_map->data_start();
- MemsetPointer(data, isolate->heap()->the_hole_value(), old_length);
- }
+ // Zap the old map to avoid any stale entries. Note that this is required
+ // for correctness because entries are being treated weakly by the GC.
+ MemsetPointer(old_code_map->data_start(), isolate->heap()->the_hole_value(),
+ old_length);
}
new_code_map->set(old_length + kContextOffset, *native_context);
new_code_map->set(old_length + kCachedCodeOffset, *code);
@@ -9724,7 +9478,10 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
// Always trim even when array is cleared because of heap verifier.
GetHeap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(code_map,
length - dst);
- if (code_map->length() == kEntriesStart) ClearOptimizedCodeMap();
+ if (code_map->length() == kEntriesStart &&
+ code_map->get(kSharedCodeIndex)->IsUndefined()) {
+ ClearOptimizedCodeMap();
+ }
}
}
@@ -9736,7 +9493,8 @@ void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
// Always trim even when array is cleared because of heap verifier.
GetHeap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(code_map,
shrink_by);
- if (code_map->length() == kEntriesStart) {
+ if (code_map->length() == kEntriesStart &&
+ code_map->get(kSharedCodeIndex)->IsUndefined()) {
ClearOptimizedCodeMap();
}
}
@@ -9752,7 +9510,7 @@ static void GetMinInobjectSlack(Map* map, void* data) {
static void ShrinkInstanceSize(Map* map, void* data) {
int slack = *reinterpret_cast<int*>(data);
- map->set_inobject_properties(map->inobject_properties() - slack);
+ map->SetInObjectProperties(map->GetInObjectProperties() - slack);
map->set_unused_property_fields(map->unused_property_fields() - slack);
map->set_instance_size(map->instance_size() - slack * kPointerSize);
@@ -10234,7 +9992,7 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
} else {
prototype = isolate->factory()->NewFunctionPrototype(function);
}
- map->set_inobject_properties(in_object_properties);
+ map->SetInObjectProperties(in_object_properties);
map->set_unused_property_fields(in_object_properties);
DCHECK(map->has_fast_object_elements());
@@ -10253,7 +10011,7 @@ void JSFunction::SetInstanceClassName(String* name) {
void JSFunction::PrintName(FILE* out) {
- SmartArrayPointer<char> name = shared()->DebugName()->ToCString();
+ base::SmartArrayPointer<char> name = shared()->DebugName()->ToCString();
PrintF(out, "%s", name.get());
}
@@ -10304,15 +10062,16 @@ Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
}
-void Oddball::Initialize(Isolate* isolate,
- Handle<Oddball> oddball,
- const char* to_string,
- Handle<Object> to_number,
- byte kind) {
+void Oddball::Initialize(Isolate* isolate, Handle<Oddball> oddball,
+ const char* to_string, Handle<Object> to_number,
+ const char* type_of, byte kind) {
Handle<String> internalized_to_string =
isolate->factory()->InternalizeUtf8String(to_string);
- oddball->set_to_string(*internalized_to_string);
+ Handle<String> internalized_type_of =
+ isolate->factory()->InternalizeUtf8String(type_of);
oddball->set_to_number(*to_number);
+ oddball->set_to_string(*internalized_to_string);
+ oddball->set_type_of(*internalized_type_of);
oddball->set_kind(kind);
}
@@ -10659,8 +10418,8 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
if (lit->dont_optimize_reason() != kNoReason) {
shared_info->DisableOptimization(lit->dont_optimize_reason());
}
- shared_info->set_dont_crankshaft(
- lit->flags()->Contains(AstPropertiesFlag::kDontCrankshaft));
+ shared_info->set_dont_crankshaft(lit->flags() &
+ AstProperties::kDontCrankshaft);
shared_info->set_kind(lit->kind());
shared_info->set_needs_home_object(lit->scope()->NeedsHomeObject());
shared_info->set_asm_function(lit->scope()->asm_function());
@@ -10798,11 +10557,9 @@ void ObjectVisitor::VisitCell(RelocInfo* rinfo) {
void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
- DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+ DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence());
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
Object* old_target = target;
VisitPointer(&target);
CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target.
@@ -10846,7 +10603,7 @@ void Code::InvalidateEmbeddedObjects() {
void Code::Relocate(intptr_t delta) {
for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
- it.rinfo()->apply(delta, SKIP_ICACHE_FLUSH);
+ it.rinfo()->apply(delta);
}
CpuFeatures::FlushICache(instruction_start(), instruction_size());
}
@@ -10899,7 +10656,7 @@ void Code::CopyFrom(const CodeDesc& desc) {
Code* code = Code::cast(*p);
it.rinfo()->set_code_age_stub(code, SKIP_ICACHE_FLUSH);
} else {
- it.rinfo()->apply(delta, SKIP_ICACHE_FLUSH);
+ it.rinfo()->apply(delta);
}
}
CpuFeatures::FlushICache(instruction_start(), instruction_size());
@@ -11826,6 +11583,26 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
#endif // ENABLE_DISASSEMBLER
+void BytecodeArray::Disassemble(std::ostream& os) {
+ os << "Frame size " << frame_size() << "\n";
+ Vector<char> buf = Vector<char>::New(50);
+
+ const uint8_t* first_bytecode_address = GetFirstBytecodeAddress();
+ int bytecode_size = 0;
+ for (int i = 0; i < this->length(); i += bytecode_size) {
+ const uint8_t* bytecode_start = &first_bytecode_address[i];
+ interpreter::Bytecode bytecode =
+ interpreter::Bytecodes::FromByte(bytecode_start[0]);
+ bytecode_size = interpreter::Bytecodes::Size(bytecode);
+
+ SNPrintF(buf, "%p", bytecode_start);
+ os << buf.start() << " : ";
+ interpreter::Bytecodes::Decode(os, bytecode_start);
+ os << "\n";
+ }
+}
+
+
// static
void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
DCHECK(capacity >= 0);
@@ -12019,9 +11796,10 @@ Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
Handle<DependentCode> DependentCode::EnsureSpace(
Handle<DependentCode> entries) {
+ Isolate* isolate = entries->GetIsolate();
if (entries->length() == 0) {
entries = Handle<DependentCode>::cast(
- FixedArray::CopySize(entries, kCodesStartIndex + 1, TENURED));
+ isolate->factory()->NewFixedArray(kCodesStartIndex + 1, TENURED));
for (int g = 0; g < kGroupCount; g++) {
entries->set_number_of_entries(static_cast<DependencyGroup>(g), 0);
}
@@ -12031,8 +11809,9 @@ Handle<DependentCode> DependentCode::EnsureSpace(
GroupStartIndexes starts(*entries);
int capacity =
kCodesStartIndex + DependentCode::Grow(starts.number_of_entries());
+ int grow_by = capacity - entries->length();
return Handle<DependentCode>::cast(
- FixedArray::CopySize(entries, capacity, TENURED));
+ isolate->factory()->CopyFixedArrayAndGrow(entries, grow_by, TENURED));
}
@@ -12365,16 +12144,6 @@ void JSObject::ValidateElements(Handle<JSObject> object) {
}
-// static
-MaybeHandle<Object> JSReceiver::SetElement(Handle<JSReceiver> object,
- uint32_t index, Handle<Object> value,
- LanguageMode language_mode) {
- Isolate* isolate = object->GetIsolate();
- LookupIterator it(isolate, object, index);
- return SetProperty(&it, value, language_mode, MAY_BE_STORE_FROM_KEYED);
-}
-
-
static bool ShouldConvertToSlowElements(JSObject* object, uint32_t capacity,
uint32_t index,
uint32_t* new_capacity) {
@@ -12395,9 +12164,7 @@ static bool ShouldConvertToSlowElements(JSObject* object, uint32_t capacity,
// If the fast-case backing storage takes up roughly three times as
// much space (in machine words) as a dictionary backing storage
// would, the object should have slow elements.
- int old_capacity = 0;
- int used_elements = 0;
- object->GetElementsCapacityAndUsage(&old_capacity, &used_elements);
+ int used_elements = object->GetFastElementsUsage();
int dictionary_size = SeededNumberDictionary::ComputeCapacity(used_elements) *
SeededNumberDictionary::kEntrySize;
return 3 * static_cast<uint32_t>(dictionary_size) <= *new_capacity;
@@ -12764,79 +12531,47 @@ MaybeHandle<Object> JSArray::ReadOnlyLengthError(Handle<JSArray> array) {
}
-void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
- *capacity = 0;
- *used = 0;
+template <typename BackingStore>
+static int FastHoleyElementsUsage(JSObject* object, BackingStore* store) {
+ int limit = object->IsJSArray()
+ ? Smi::cast(JSArray::cast(object)->length())->value()
+ : store->length();
+ int used = 0;
+ for (int i = 0; i < limit; ++i) {
+ if (!store->is_the_hole(i)) ++used;
+ }
+ return used;
+}
+
- FixedArrayBase* backing_store_base = FixedArrayBase::cast(elements());
- FixedArray* backing_store = NULL;
+int JSObject::GetFastElementsUsage() {
+ FixedArrayBase* store = elements();
switch (GetElementsKind()) {
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- backing_store_base =
- FixedArray::cast(FixedArray::cast(backing_store_base)->get(1));
- backing_store = FixedArray::cast(backing_store_base);
- if (backing_store->IsDictionary()) {
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(backing_store);
- *capacity = dictionary->Capacity();
- *used = dictionary->NumberOfElements();
- break;
- }
- // Fall through.
case FAST_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- if (IsJSArray()) {
- *capacity = backing_store_base->length();
- *used = Smi::cast(JSArray::cast(this)->length())->value();
- break;
- }
- // Fall through if packing is not guaranteed.
+ // Only JSArray have packed elements.
+ return Smi::cast(JSArray::cast(this)->length())->value();
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ store = FixedArray::cast(FixedArray::cast(store)->get(1));
+ // Fall through.
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
- backing_store = FixedArray::cast(backing_store_base);
- *capacity = backing_store->length();
- for (int i = 0; i < *capacity; ++i) {
- if (!backing_store->get(i)->IsTheHole()) ++(*used);
- }
- break;
- case DICTIONARY_ELEMENTS: {
- SeededNumberDictionary* dictionary = element_dictionary();
- *capacity = dictionary->Capacity();
- *used = dictionary->NumberOfElements();
- break;
- }
- case FAST_DOUBLE_ELEMENTS:
- if (IsJSArray()) {
- *capacity = backing_store_base->length();
- *used = Smi::cast(JSArray::cast(this)->length())->value();
- break;
- }
- // Fall through if packing is not guaranteed.
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
- *capacity = elements()->length();
- if (*capacity == 0) break;
- FixedDoubleArray * elms = FixedDoubleArray::cast(elements());
- for (int i = 0; i < *capacity; i++) {
- if (!elms->is_the_hole(i)) ++(*used);
- }
- break;
- }
+ return FastHoleyElementsUsage(this, FixedArray::cast(store));
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ if (elements()->length() == 0) return 0;
+ return FastHoleyElementsUsage(this, FixedDoubleArray::cast(store));
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: \
case TYPE##_ELEMENTS: \
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
- {
- // External arrays are considered 100% used.
- FixedArrayBase* external_array = FixedArrayBase::cast(elements());
- *capacity = external_array->length();
- *used = external_array->length();
- break;
- }
+ UNREACHABLE();
}
+ return 0;
}
@@ -13163,10 +12898,11 @@ void FixedArray::SortPairs(FixedArray* numbers, uint32_t len) {
// Fill in the names of own properties into the supplied storage. The main
// purpose of this function is to provide reflection information for the object
// mirrors.
-void JSObject::GetOwnPropertyNames(
- FixedArray* storage, int index, PropertyAttributes filter) {
+int JSObject::GetOwnPropertyNames(FixedArray* storage, int index,
+ PropertyAttributes filter) {
DCHECK(storage->length() >= (NumberOfOwnProperties(filter) - index));
if (HasFastProperties()) {
+ int start_index = index;
int real_size = map()->NumberOfOwnDescriptors();
DescriptorArray* descs = map()->instance_descriptors();
for (int i = 0; i < real_size; i++) {
@@ -13175,12 +12911,13 @@ void JSObject::GetOwnPropertyNames(
storage->set(index++, descs->GetKey(i));
}
}
+ return index - start_index;
} else if (IsGlobalObject()) {
- global_dictionary()->CopyKeysTo(storage, index, filter,
- GlobalDictionary::UNSORTED);
+ return global_dictionary()->CopyKeysTo(storage, index, filter,
+ GlobalDictionary::UNSORTED);
} else {
- property_dictionary()->CopyKeysTo(storage, index, filter,
- NameDictionary::UNSORTED);
+ return property_dictionary()->CopyKeysTo(storage, index, filter,
+ NameDictionary::UNSORTED);
}
}
@@ -13207,6 +12944,23 @@ int JSObject::NumberOfEnumElements() {
int JSObject::GetOwnElementKeys(FixedArray* storage,
PropertyAttributes filter) {
int counter = 0;
+
+ // If this is a String wrapper, add the string indices first,
+ // as they're guaranteed to preced the elements in numerical order
+ // and ascending order is required by ECMA-262, 6th, 9.1.12.
+ if (IsJSValue()) {
+ Object* val = JSValue::cast(this)->value();
+ if (val->IsString()) {
+ String* str = String::cast(val);
+ if (storage) {
+ for (int i = 0; i < str->length(); i++) {
+ storage->set(counter + i, Smi::FromInt(i));
+ }
+ }
+ counter += str->length();
+ }
+ }
+
switch (GetElementsKind()) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
@@ -13244,7 +12998,6 @@ int JSObject::GetOwnElementKeys(FixedArray* storage,
}
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: \
case TYPE##_ELEMENTS: \
TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -13263,7 +13016,7 @@ int JSObject::GetOwnElementKeys(FixedArray* storage,
case DICTIONARY_ELEMENTS: {
if (storage != NULL) {
- element_dictionary()->CopyKeysTo(storage, filter,
+ element_dictionary()->CopyKeysTo(storage, counter, filter,
SeededNumberDictionary::SORTED);
}
counter += element_dictionary()->NumberOfElementsFilterAttributes(filter);
@@ -13280,7 +13033,7 @@ int JSObject::GetOwnElementKeys(FixedArray* storage,
SeededNumberDictionary* dictionary =
SeededNumberDictionary::cast(arguments);
if (storage != NULL) {
- dictionary->CopyKeysTo(storage, filter,
+ dictionary->CopyKeysTo(storage, counter, filter,
SeededNumberDictionary::UNSORTED);
}
counter += dictionary->NumberOfElementsFilterAttributes(filter);
@@ -13313,18 +13066,6 @@ int JSObject::GetOwnElementKeys(FixedArray* storage,
}
}
- if (this->IsJSValue()) {
- Object* val = JSValue::cast(this)->value();
- if (val->IsString()) {
- String* str = String::cast(val);
- if (storage) {
- for (int i = 0; i < str->length(); i++) {
- storage->set(counter + i, Smi::FromInt(i));
- }
- }
- counter += str->length();
- }
- }
DCHECK(!storage || storage->length() == counter);
return counter;
}
@@ -13580,9 +13321,7 @@ Handle<Derived> HashTable<Derived, Shape, Key>::New(
int capacity = (capacity_option == USE_CUSTOM_MINIMUM_CAPACITY)
? at_least_space_for
- : isolate->creating_default_snapshot()
- ? ComputeCapacityForSerialization(at_least_space_for)
- : ComputeCapacity(at_least_space_for);
+ : ComputeCapacity(at_least_space_for);
if (capacity > HashTable::kMaxCapacity) {
v8::internal::Heap::FatalProcessOutOfMemory("invalid table size", true);
}
@@ -14004,7 +13743,7 @@ Handle<Object> JSObject::PrepareSlowElementsForSort(
return bailout;
} else {
Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
- new_dict, pos, value, details);
+ new_dict, pos, value, details, object->map()->is_prototype_map());
DCHECK(result.is_identical_to(new_dict));
USE(result);
pos++;
@@ -14015,7 +13754,7 @@ Handle<Object> JSObject::PrepareSlowElementsForSort(
return bailout;
} else {
Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
- new_dict, key, value, details);
+ new_dict, key, value, details, object->map()->is_prototype_map());
DCHECK(result.is_identical_to(new_dict));
USE(result);
}
@@ -14031,7 +13770,8 @@ Handle<Object> JSObject::PrepareSlowElementsForSort(
}
HandleScope scope(isolate);
Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
- new_dict, pos, isolate->factory()->undefined_value(), no_details);
+ new_dict, pos, isolate->factory()->undefined_value(), no_details,
+ object->map()->is_prototype_map());
DCHECK(result.is_identical_to(new_dict));
USE(result);
pos++;
@@ -14078,8 +13818,7 @@ Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
JSObject::ValidateElements(object);
JSObject::SetMapAndElements(object, new_map, fast_elements);
- } else if (object->HasExternalArrayElements() ||
- object->HasFixedTypedArrayElements()) {
+ } else if (object->HasFixedTypedArrayElements()) {
// Typed arrays cannot have holes or undefined elements.
return handle(Smi::FromInt(
FixedArrayBase::cast(object->elements())->length()), isolate);
@@ -14182,7 +13921,6 @@ Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
ExternalArrayType JSTypedArray::type() {
switch (elements()->map()->instance_type()) {
#define INSTANCE_TYPE_TO_ARRAY_TYPE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ARRAY_TYPE: \
case FIXED_##TYPE##_ARRAY_TYPE: \
return kExternal##Type##Array;
@@ -14198,9 +13936,9 @@ ExternalArrayType JSTypedArray::type() {
size_t JSTypedArray::element_size() {
switch (elements()->map()->instance_type()) {
-#define INSTANCE_TYPE_TO_ELEMENT_SIZE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ARRAY_TYPE: \
- return size;
+#define INSTANCE_TYPE_TO_ELEMENT_SIZE(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ return size;
TYPED_ARRAYS(INSTANCE_TYPE_TO_ELEMENT_SIZE)
#undef INSTANCE_TYPE_TO_ELEMENT_SIZE
@@ -14218,131 +13956,6 @@ void FixedArray::SetValue(uint32_t index, Object* value) { set(index, value); }
void FixedDoubleArray::SetValue(uint32_t index, Object* value) {
set(index, value->Number());
}
-
-
-void ExternalUint8ClampedArray::SetValue(uint32_t index, Object* value) {
- uint8_t clamped_value = 0;
- if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
- if (int_value < 0) {
- clamped_value = 0;
- } else if (int_value > 255) {
- clamped_value = 255;
- } else {
- clamped_value = static_cast<uint8_t>(int_value);
- }
- } else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
- if (!(double_value > 0)) {
- // NaN and less than zero clamp to zero.
- clamped_value = 0;
- } else if (double_value > 255) {
- // Greater than 255 clamp to 255.
- clamped_value = 255;
- } else {
- // Other doubles are rounded to the nearest integer.
- clamped_value = static_cast<uint8_t>(lrint(double_value));
- }
- } else {
- // Clamp undefined to zero (default). All other types have been
- // converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined());
- }
- set(index, clamped_value);
-}
-
-
-template <typename ExternalArrayClass, typename ValueType>
-static void ExternalArrayIntSetter(ExternalArrayClass* receiver, uint32_t index,
- Object* value) {
- ValueType cast_value = 0;
- if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
- cast_value = static_cast<ValueType>(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
- cast_value = static_cast<ValueType>(DoubleToInt32(double_value));
- } else {
- // Clamp undefined to zero (default). All other types have been
- // converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined());
- }
- receiver->set(index, cast_value);
-}
-
-
-void ExternalInt8Array::SetValue(uint32_t index, Object* value) {
- ExternalArrayIntSetter<ExternalInt8Array, int8_t>(this, index, value);
-}
-
-
-void ExternalUint8Array::SetValue(uint32_t index, Object* value) {
- ExternalArrayIntSetter<ExternalUint8Array, uint8_t>(this, index, value);
-}
-
-
-void ExternalInt16Array::SetValue(uint32_t index, Object* value) {
- ExternalArrayIntSetter<ExternalInt16Array, int16_t>(this, index, value);
-}
-
-
-void ExternalUint16Array::SetValue(uint32_t index, Object* value) {
- ExternalArrayIntSetter<ExternalUint16Array, uint16_t>(this, index, value);
-}
-
-
-void ExternalInt32Array::SetValue(uint32_t index, Object* value) {
- ExternalArrayIntSetter<ExternalInt32Array, int32_t>(this, index, value);
-}
-
-
-void ExternalUint32Array::SetValue(uint32_t index, Object* value) {
- uint32_t cast_value = 0;
- if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
- cast_value = static_cast<uint32_t>(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
- cast_value = static_cast<uint32_t>(DoubleToUint32(double_value));
- } else {
- // Clamp undefined to zero (default). All other types have been
- // converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined());
- }
- set(index, cast_value);
-}
-
-
-void ExternalFloat32Array::SetValue(uint32_t index, Object* value) {
- float cast_value = std::numeric_limits<float>::quiet_NaN();
- if (value->IsSmi()) {
- int int_value = Smi::cast(value)->value();
- cast_value = static_cast<float>(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
- cast_value = static_cast<float>(double_value);
- } else {
- // Clamp undefined to NaN (default). All other types have been
- // converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined());
- }
- set(index, cast_value);
-}
-
-
-void ExternalFloat64Array::SetValue(uint32_t index, Object* value) {
- double double_value = std::numeric_limits<double>::quiet_NaN();
- if (value->IsNumber()) {
- double_value = value->Number();
- } else {
- // Clamp undefined to NaN (default). All other types have been
- // converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined());
- }
- set(index, double_value);
-}
-
-
void GlobalObject::InvalidatePropertyCell(Handle<GlobalObject> global,
Handle<Name> name) {
DCHECK(!global->HasFastProperties());
@@ -14897,7 +14510,8 @@ void Dictionary<Derived, Shape, Key>::AddEntry(
}
-void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key) {
+void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key,
+ bool used_as_prototype) {
DisallowHeapAllocation no_allocation;
// If the dictionary requires slow elements an element has already
// been added at a high index.
@@ -14905,6 +14519,10 @@ void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key) {
// Check if this index is high enough that we should require slow
// elements.
if (key > kRequiresSlowElementsLimit) {
+ if (used_as_prototype) {
+ // TODO(verwaest): Remove this hack.
+ GetHeap()->ClearAllICsByKind(Code::KEYED_STORE_IC);
+ }
set_requires_slow_elements();
return;
}
@@ -14918,11 +14536,9 @@ void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key) {
Handle<SeededNumberDictionary> SeededNumberDictionary::AddNumberEntry(
- Handle<SeededNumberDictionary> dictionary,
- uint32_t key,
- Handle<Object> value,
- PropertyDetails details) {
- dictionary->UpdateMaxNumberKey(key);
+ Handle<SeededNumberDictionary> dictionary, uint32_t key,
+ Handle<Object> value, PropertyDetails details, bool used_as_prototype) {
+ dictionary->UpdateMaxNumberKey(key, used_as_prototype);
SLOW_DCHECK(dictionary->FindEntry(key) == kNotFound);
return Add(dictionary, key, value, details);
}
@@ -14938,10 +14554,9 @@ Handle<UnseededNumberDictionary> UnseededNumberDictionary::AddNumberEntry(
Handle<SeededNumberDictionary> SeededNumberDictionary::AtNumberPut(
- Handle<SeededNumberDictionary> dictionary,
- uint32_t key,
- Handle<Object> value) {
- dictionary->UpdateMaxNumberKey(key);
+ Handle<SeededNumberDictionary> dictionary, uint32_t key,
+ Handle<Object> value, bool used_as_prototype) {
+ dictionary->UpdateMaxNumberKey(key, used_as_prototype);
return AtPut(dictionary, key, value);
}
@@ -14955,13 +14570,11 @@ Handle<UnseededNumberDictionary> UnseededNumberDictionary::AtNumberPut(
Handle<SeededNumberDictionary> SeededNumberDictionary::Set(
- Handle<SeededNumberDictionary> dictionary,
- uint32_t key,
- Handle<Object> value,
- PropertyDetails details) {
+ Handle<SeededNumberDictionary> dictionary, uint32_t key,
+ Handle<Object> value, PropertyDetails details, bool used_as_prototype) {
int entry = dictionary->FindEntry(key);
if (entry == kNotFound) {
- return AddNumberEntry(dictionary, key, value, details);
+ return AddNumberEntry(dictionary, key, value, details, used_as_prototype);
}
// Preserve enumeration index.
details = details.set_index(dictionary->DetailsAt(entry).dictionary_index());
@@ -15020,29 +14633,6 @@ bool Dictionary<Derived, Shape, Key>::HasComplexElements() {
}
-template <typename Derived, typename Shape, typename Key>
-void Dictionary<Derived, Shape, Key>::CopyKeysTo(
- FixedArray* storage, PropertyAttributes filter,
- typename Dictionary<Derived, Shape, Key>::SortMode sort_mode) {
- DCHECK(storage->length() >= NumberOfElementsFilterAttributes(filter));
- int capacity = this->Capacity();
- int index = 0;
- for (int i = 0; i < capacity; i++) {
- Object* k = this->KeyAt(i);
- if (this->IsKey(k) && !FilterKey(k, filter)) {
- if (this->IsDeleted(i)) continue;
- PropertyDetails details = this->DetailsAt(i);
- PropertyAttributes attr = details.attributes();
- if ((attr & filter) == 0) storage->set(index++, k);
- }
- }
- if (sort_mode == Dictionary::SORTED) {
- storage->SortPairs(storage, index);
- }
- DCHECK(storage->length() >= index);
-}
-
-
template <typename Dictionary>
struct EnumIndexComparator {
explicit EnumIndexComparator(Dictionary* dict) : dict(dict) {}
@@ -15082,10 +14672,11 @@ void Dictionary<Derived, Shape, Key>::CopyEnumKeysTo(FixedArray* storage) {
template <typename Derived, typename Shape, typename Key>
-void Dictionary<Derived, Shape, Key>::CopyKeysTo(
+int Dictionary<Derived, Shape, Key>::CopyKeysTo(
FixedArray* storage, int index, PropertyAttributes filter,
typename Dictionary<Derived, Shape, Key>::SortMode sort_mode) {
DCHECK(storage->length() >= NumberOfElementsFilterAttributes(filter));
+ int start_index = index;
int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = this->KeyAt(i);
@@ -15100,6 +14691,7 @@ void Dictionary<Derived, Shape, Key>::CopyKeysTo(
storage->SortPairs(storage, index);
}
DCHECK(storage->length() >= index);
+ return index - start_index;
}
@@ -15973,21 +15565,6 @@ void JSArrayBuffer::Neuter() {
}
-static ElementsKind FixedToExternalElementsKind(ElementsKind elements_kind) {
- switch (elements_kind) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: return EXTERNAL_##TYPE##_ELEMENTS;
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
- default:
- UNREACHABLE();
- return FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND;
- }
-}
-
-
Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
Handle<JSTypedArray> typed_array) {
@@ -15996,10 +15573,6 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
DCHECK(IsFixedTypedArrayElementsKind(map->elements_kind()));
- Handle<Map> new_map = Map::TransitionElementsTo(
- map,
- FixedToExternalElementsKind(map->elements_kind()));
-
Handle<FixedTypedArrayBase> fixed_typed_array(
FixedTypedArrayBase::cast(typed_array->elements()));
@@ -16016,21 +15589,23 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
memcpy(buffer->backing_store(),
fixed_typed_array->DataPtr(),
fixed_typed_array->DataSize());
- Handle<ExternalArray> new_elements =
- isolate->factory()->NewExternalArray(
+ Handle<FixedTypedArrayBase> new_elements =
+ isolate->factory()->NewFixedTypedArrayWithExternalPointer(
fixed_typed_array->length(), typed_array->type(),
static_cast<uint8_t*>(buffer->backing_store()));
- JSObject::SetMapAndElements(typed_array, new_map, new_elements);
+ typed_array->set_elements(*new_elements);
return buffer;
}
Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
- if (IsExternalArrayElementsKind(map()->elements_kind())) {
- Handle<Object> result(buffer(), GetIsolate());
- return Handle<JSArrayBuffer>::cast(result);
+ Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(buffer()),
+ GetIsolate());
+ if (array_buffer->was_neutered() ||
+ array_buffer->backing_store() != nullptr) {
+ return array_buffer;
}
Handle<JSTypedArray> self(this);
return MaterializeArrayBuffer(self);
@@ -16058,6 +15633,8 @@ Handle<PropertyCell> PropertyCell::InvalidateEntry(
} else {
cell->set_value(isolate->heap()->the_hole_value());
}
+ details = details.set_cell_type(PropertyCellType::kInvalidated);
+ cell->set_property_details(details);
cell->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPropertyCellChangedGroup);
return new_cell;
@@ -16152,8 +15729,9 @@ void PropertyCell::UpdateCell(Handle<GlobalDictionary> dictionary, int entry,
cell->set_value(*value);
// Deopt when transitioning from a constant type.
- if (!invalidate && (old_type != new_type)) {
- auto isolate = dictionary->GetIsolate();
+ if (!invalidate && (old_type != new_type ||
+ original_details.IsReadOnly() != details.IsReadOnly())) {
+ Isolate* isolate = dictionary->GetIsolate();
cell->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPropertyCellChangedGroup);
}
@@ -16170,5 +15748,6 @@ void PropertyCell::SetValueWithInvalidation(Handle<PropertyCell> cell,
isolate, DependentCode::kPropertyCellChangedGroup);
}
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 83e103db51..8ca9428710 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -11,6 +11,7 @@
#include "src/assert-scope.h"
#include "src/bailout-reason.h"
#include "src/base/bits.h"
+#include "src/base/smart-pointers.h"
#include "src/builtins.h"
#include "src/checks.h"
#include "src/elements-kind.h"
@@ -18,7 +19,6 @@
#include "src/flags.h"
#include "src/list.h"
#include "src/property-details.h"
-#include "src/smart-pointers.h"
#include "src/unicode-inl.h"
#include "src/unicode-decoder.h"
#include "src/zone.h"
@@ -73,6 +73,7 @@
// - JSFunctionProxy
// - FixedArrayBase
// - ByteArray
+// - BytecodeArray
// - FixedArray
// - DescriptorArray
// - HashTable
@@ -86,21 +87,11 @@
// - OrderedHashMap
// - Context
// - TypeFeedbackVector
-// - JSFunctionResultCache
// - ScopeInfo
// - TransitionArray
// - ScriptContextTable
// - WeakFixedArray
// - FixedDoubleArray
-// - ExternalArray
-// - ExternalUint8ClampedArray
-// - ExternalInt8Array
-// - ExternalUint8Array
-// - ExternalInt16Array
-// - ExternalUint16Array
-// - ExternalInt32Array
-// - ExternalUint32Array
-// - ExternalFloat32Array
// - Name
// - String
// - SeqString
@@ -121,7 +112,14 @@
// - ExternalTwoByteInternalizedString
// - Symbol
// - HeapNumber
-// - Float32x4
+// - Simd128Value
+// - Float32x4
+// - Int32x4
+// - Bool32x4
+// - Int16x8
+// - Bool16x8
+// - Int8x16
+// - Bool8x16
// - Cell
// - PropertyCell
// - Code
@@ -175,10 +173,7 @@ enum KeyedAccessStoreMode {
};
-enum ContextualMode {
- NOT_CONTEXTUAL,
- CONTEXTUAL
-};
+enum TypeofMode { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
enum MutableMode {
@@ -290,12 +285,6 @@ enum TransitionFlag {
};
-enum DebugExtraICState {
- DEBUG_BREAK,
- DEBUG_PREPARE_STEP_IN
-};
-
-
// Indicates whether the transition is simple: the target map of the transition
// either extends the current map with a new property, or it modifies the
// property that was added last to the current map.
@@ -386,6 +375,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
\
V(SYMBOL_TYPE) \
+ V(SIMD128_VALUE_TYPE) \
\
V(MAP_TYPE) \
V(CODE_TYPE) \
@@ -395,22 +385,10 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
\
V(HEAP_NUMBER_TYPE) \
V(MUTABLE_HEAP_NUMBER_TYPE) \
- V(FLOAT32X4_TYPE) \
V(FOREIGN_TYPE) \
V(BYTE_ARRAY_TYPE) \
+ V(BYTECODE_ARRAY_TYPE) \
V(FREE_SPACE_TYPE) \
- /* Note: the order of these external array */ \
- /* types is relied upon in */ \
- /* Object::IsExternalArray(). */ \
- V(EXTERNAL_INT8_ARRAY_TYPE) \
- V(EXTERNAL_UINT8_ARRAY_TYPE) \
- V(EXTERNAL_INT16_ARRAY_TYPE) \
- V(EXTERNAL_UINT16_ARRAY_TYPE) \
- V(EXTERNAL_INT32_ARRAY_TYPE) \
- V(EXTERNAL_UINT32_ARRAY_TYPE) \
- V(EXTERNAL_FLOAT32_ARRAY_TYPE) \
- V(EXTERNAL_FLOAT64_ARRAY_TYPE) \
- V(EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE) \
\
V(FIXED_INT8_ARRAY_TYPE) \
V(FIXED_UINT8_ARRAY_TYPE) \
@@ -634,8 +612,8 @@ static inline bool IsShortcutCandidate(int type) {
enum InstanceType {
// String types.
- INTERNALIZED_STRING_TYPE =
- kTwoByteStringTag | kSeqStringTag | kInternalizedTag,
+ INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kSeqStringTag |
+ kInternalizedTag, // FIRST_PRIMITIVE_TYPE
ONE_BYTE_INTERNALIZED_STRING_TYPE =
kOneByteStringTag | kSeqStringTag | kInternalizedTag,
EXTERNAL_INTERNALIZED_STRING_TYPE =
@@ -682,29 +660,23 @@ enum InstanceType {
// Non-string names
SYMBOL_TYPE = kNotStringTag, // FIRST_NONSTRING_TYPE, LAST_NAME_TYPE
+ // Other primitives (cannot contain non-map-word pointers to heap objects).
+ HEAP_NUMBER_TYPE,
+ SIMD128_VALUE_TYPE,
+ ODDBALL_TYPE, // LAST_PRIMITIVE_TYPE
+
// Objects allocated in their own spaces (never in new space).
MAP_TYPE,
CODE_TYPE,
- ODDBALL_TYPE,
// "Data", objects that cannot contain non-map-word pointers to heap
// objects.
- HEAP_NUMBER_TYPE,
MUTABLE_HEAP_NUMBER_TYPE,
- FLOAT32X4_TYPE, // FIRST_SIMD_TYPE, LAST_SIMD_TYPE
FOREIGN_TYPE,
BYTE_ARRAY_TYPE,
+ BYTECODE_ARRAY_TYPE,
FREE_SPACE_TYPE,
- EXTERNAL_INT8_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE
- EXTERNAL_UINT8_ARRAY_TYPE,
- EXTERNAL_INT16_ARRAY_TYPE,
- EXTERNAL_UINT16_ARRAY_TYPE,
- EXTERNAL_INT32_ARRAY_TYPE,
- EXTERNAL_UINT32_ARRAY_TYPE,
- EXTERNAL_FLOAT32_ARRAY_TYPE,
- EXTERNAL_FLOAT64_ARRAY_TYPE,
- EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
- FIXED_INT8_ARRAY_TYPE, // FIRST_FIXED_TYPED_ARRAY_TYPE
+ FIXED_INT8_ARRAY_TYPE, // FIRST_FIXED_TYPED_ARRAY_TYPE
FIXED_UINT8_ARRAY_TYPE,
FIXED_INT16_ARRAY_TYPE,
FIXED_UINT16_ARRAY_TYPE,
@@ -783,12 +755,8 @@ enum InstanceType {
FIRST_UNIQUE_NAME_TYPE = INTERNALIZED_STRING_TYPE,
LAST_UNIQUE_NAME_TYPE = SYMBOL_TYPE,
FIRST_NONSTRING_TYPE = SYMBOL_TYPE,
- // Boundaries for testing for a SIMD type.
- FIRST_SIMD_TYPE = FLOAT32X4_TYPE,
- LAST_SIMD_TYPE = FLOAT32X4_TYPE,
- // Boundaries for testing for an external array.
- FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_INT8_ARRAY_TYPE,
- LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE,
+ FIRST_PRIMITIVE_TYPE = FIRST_NAME_TYPE,
+ LAST_PRIMITIVE_TYPE = ODDBALL_TYPE,
// Boundaries for testing for a fixed typed array.
FIRST_FIXED_TYPED_ARRAY_TYPE = FIXED_INT8_ARRAY_TYPE,
LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_UINT8_CLAMPED_ARRAY_TYPE,
@@ -818,9 +786,6 @@ enum InstanceType {
NUM_OF_CALLABLE_SPEC_OBJECT_TYPES = 2
};
-const int kExternalArrayTypeCount =
- LAST_EXTERNAL_ARRAY_TYPE - FIRST_EXTERNAL_ARRAY_TYPE + 1;
-
STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
STATIC_ASSERT(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType);
@@ -924,6 +889,14 @@ template <class C> inline bool Is(Object* obj);
#define HEAP_OBJECT_TYPE_LIST(V) \
V(HeapNumber) \
V(MutableHeapNumber) \
+ V(Simd128Value) \
+ V(Float32x4) \
+ V(Int32x4) \
+ V(Bool32x4) \
+ V(Int16x8) \
+ V(Bool16x8) \
+ V(Int8x16) \
+ V(Bool8x16) \
V(Name) \
V(UniqueName) \
V(String) \
@@ -938,16 +911,6 @@ template <class C> inline bool Is(Object* obj);
V(InternalizedString) \
V(Symbol) \
\
- V(ExternalArray) \
- V(ExternalInt8Array) \
- V(ExternalUint8Array) \
- V(ExternalInt16Array) \
- V(ExternalUint16Array) \
- V(ExternalInt32Array) \
- V(ExternalUint32Array) \
- V(ExternalFloat32Array) \
- V(ExternalFloat64Array) \
- V(ExternalUint8ClampedArray) \
V(FixedTypedArrayBase) \
V(FixedUint8Array) \
V(FixedInt8Array) \
@@ -958,8 +921,8 @@ template <class C> inline bool Is(Object* obj);
V(FixedFloat32Array) \
V(FixedFloat64Array) \
V(FixedUint8ClampedArray) \
- V(Float32x4) \
V(ByteArray) \
+ V(BytecodeArray) \
V(FreeSpace) \
V(JSReceiver) \
V(JSObject) \
@@ -1011,7 +974,6 @@ template <class C> inline bool Is(Object* obj);
V(HashTable) \
V(Dictionary) \
V(StringTable) \
- V(JSFunctionResultCache) \
V(NormalizedMapCache) \
V(CompilationCacheTable) \
V(CodeCacheHashTable) \
@@ -1097,40 +1059,11 @@ class Object {
bool ToInt32(int32_t* value);
bool ToUint32(uint32_t* value);
- inline Representation OptimalRepresentation() {
- if (!FLAG_track_fields) return Representation::Tagged();
- if (IsSmi()) {
- return Representation::Smi();
- } else if (FLAG_track_double_fields && IsHeapNumber()) {
- return Representation::Double();
- } else if (FLAG_track_computed_fields && IsUninitialized()) {
- return Representation::None();
- } else if (FLAG_track_heap_object_fields) {
- DCHECK(IsHeapObject());
- return Representation::HeapObject();
- } else {
- return Representation::Tagged();
- }
- }
+ inline Representation OptimalRepresentation();
- inline ElementsKind OptimalElementsKind() {
- if (IsSmi()) return FAST_SMI_ELEMENTS;
- if (IsNumber()) return FAST_DOUBLE_ELEMENTS;
- return FAST_ELEMENTS;
- }
+ inline ElementsKind OptimalElementsKind();
- inline bool FitsRepresentation(Representation representation) {
- if (FLAG_track_fields && representation.IsNone()) {
- return false;
- } else if (FLAG_track_fields && representation.IsSmi()) {
- return IsSmi();
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
- return IsMutableHeapNumber() || IsNumber();
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- return IsHeapObject();
- }
- return true;
- }
+ inline bool FitsRepresentation(Representation representation);
// Checks whether two valid primitive encodings of a property name resolve to
// the same logical property. E.g., the smi 1, the string "1" and the double
@@ -1155,6 +1088,9 @@ class Object {
bool BooleanValue(); // ECMA-262 9.2.
+ // ES6 section 7.2.13 Strict Equality Comparison
+ bool StrictEquals(Object* that);
+
// Convert to a JSObject if needed.
// native_context is used when creating wrapper object.
static inline MaybeHandle<JSReceiver> ToObject(Isolate* isolate,
@@ -1163,10 +1099,6 @@ class Object {
Handle<Object> object,
Handle<Context> context);
- // Converts this to a Smi if possible.
- MUST_USE_RESULT static inline MaybeHandle<Smi> ToSmi(Isolate* isolate,
- Handle<Object> object);
-
MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
LookupIterator* it, LanguageMode language_mode = SLOPPY);
@@ -1229,9 +1161,15 @@ class Object {
Isolate* isolate, Handle<Object> object, uint32_t index,
LanguageMode language_mode = SLOPPY);
+ MUST_USE_RESULT static inline MaybeHandle<Object> SetElement(
+ Isolate* isolate, Handle<Object> object, uint32_t index,
+ Handle<Object> value, LanguageMode language_mode);
+
static inline Handle<Object> GetPrototypeSkipHiddenPrototypes(
Isolate* isolate, Handle<Object> receiver);
+ bool HasInPrototypeChain(Isolate* isolate, Object* object);
+
// Returns the permanent hash code associated with this object. May return
// undefined if not yet created.
Object* GetHash();
@@ -1318,6 +1256,11 @@ class Object {
};
+// In objects.h to be usable without objects-inl.h inclusion.
+bool Object::IsSmi() const { return HAS_SMI_TAG(this); }
+bool Object::IsHeapObject() const { return Internals::HasHeapObjectTag(this); }
+
+
struct Brief {
explicit Brief(const Object* const v) : value(v) {}
const Object* value;
@@ -1336,15 +1279,26 @@ std::ostream& operator<<(std::ostream& os, const Brief& v);
class Smi: public Object {
public:
// Returns the integer value.
- inline int value() const;
+ inline int value() const { return Internals::SmiValue(this); }
// Convert a value to a Smi object.
- static inline Smi* FromInt(int value);
+ static inline Smi* FromInt(int value) {
+ DCHECK(Smi::IsValid(value));
+ return reinterpret_cast<Smi*>(Internals::IntToSmi(value));
+ }
- static inline Smi* FromIntptr(intptr_t value);
+ static inline Smi* FromIntptr(intptr_t value) {
+ DCHECK(Smi::IsValid(value));
+ int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
+ return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag);
+ }
// Returns whether value can be represented in a Smi.
- static inline bool IsValid(intptr_t value);
+ static inline bool IsValid(intptr_t value) {
+ bool result = Internals::IsValidSmi(value);
+ DCHECK_EQ(result, value >= kMinValue && value <= kMaxValue);
+ return result;
+ }
DECLARE_CAST(Smi)
@@ -1449,10 +1403,15 @@ class HeapObject: public Object {
inline Isolate* GetIsolate() const;
// Converts an address to a HeapObject pointer.
- static inline HeapObject* FromAddress(Address address);
+ static inline HeapObject* FromAddress(Address address) {
+ DCHECK_TAG_ALIGNED(address);
+ return reinterpret_cast<HeapObject*>(address + kHeapObjectTag);
+ }
// Returns the address of this HeapObject.
- inline Address address();
+ inline Address address() {
+ return reinterpret_cast<Address>(this) - kHeapObjectTag;
+ }
// Iterates over pointers contained in the object (including the Map)
void Iterate(ObjectVisitor* v);
@@ -1629,26 +1588,60 @@ class HeapNumber: public HeapObject {
};
-// The Float32x4 class describes heap allocated SIMD values holding 4 32-bit
-// IEEE floats.
-class Float32x4 : public HeapObject {
+// The Simd128Value class describes heap allocated 128 bit SIMD values.
+class Simd128Value : public HeapObject {
public:
- inline float get_lane(int lane) const;
- inline void set_lane(int lane, float value);
+ DECLARE_CAST(Simd128Value)
- DECLARE_CAST(Float32x4)
+ DECLARE_PRINTER(Simd128Value)
+ DECLARE_VERIFIER(Simd128Value)
- // Dispatched behavior.
- void Float32x4Print(std::ostream& os); // NOLINT
- DECLARE_VERIFIER(Float32x4)
+ // Equality operations.
+ inline bool Equals(Simd128Value* that);
+
+ // Checks that another instance is bit-wise equal.
+ bool BitwiseEquals(const Simd128Value* other) const;
+ // Computes a hash from the 128 bit value, viewed as 4 32-bit integers.
+ uint32_t Hash() const;
+ // Copies the 16 bytes of SIMD data to the destination address.
+ void CopyBits(void* destination) const;
// Layout description.
static const int kValueOffset = HeapObject::kHeaderSize;
static const int kSize = kValueOffset + kSimd128Size;
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Float32x4);
-};
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Simd128Value);
+};
+
+
+// V has parameters (TYPE, Type, type, lane count, lane type)
+#define SIMD128_TYPES(V) \
+ V(FLOAT32X4, Float32x4, float32x4, 4, float) \
+ V(INT32X4, Int32x4, int32x4, 4, int32_t) \
+ V(BOOL32X4, Bool32x4, bool32x4, 4, bool) \
+ V(INT16X8, Int16x8, int16x8, 8, int16_t) \
+ V(BOOL16X8, Bool16x8, bool16x8, 8, bool) \
+ V(INT8X16, Int8x16, int8x16, 16, int8_t) \
+ V(BOOL8X16, Bool8x16, bool8x16, 16, bool)
+
+#define SIMD128_VALUE_CLASS(TYPE, Type, type, lane_count, lane_type) \
+ class Type final : public Simd128Value { \
+ public: \
+ inline lane_type get_lane(int lane) const; \
+ inline void set_lane(int lane, lane_type value); \
+ \
+ DECLARE_CAST(Type) \
+ \
+ DECLARE_PRINTER(Type) \
+ \
+ inline bool Equals(Type* that); \
+ \
+ private: \
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Type); \
+ };
+SIMD128_TYPES(SIMD128_VALUE_CLASS)
+#undef SIMD128_VALUE_CLASS
enum EnsureElementsMode {
@@ -1671,10 +1664,6 @@ class JSReceiver: public HeapObject {
public:
DECLARE_CAST(JSReceiver)
- MUST_USE_RESULT static MaybeHandle<Object> SetElement(
- Handle<JSReceiver> object, uint32_t index, Handle<Object> value,
- LanguageMode language_mode);
-
// Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
MUST_USE_RESULT static inline Maybe<bool> HasProperty(
Handle<JSReceiver> object, Handle<Name> name);
@@ -1781,9 +1770,8 @@ class JSObject: public JSReceiver {
// writing to any element the array must be copied. Use
// EnsureWritableFastElements in this case.
//
- // In the slow mode the elements is either a NumberDictionary, an
- // ExternalArray, or a FixedArray parameter map for a (sloppy)
- // arguments object.
+ // In the slow mode the elements is either a NumberDictionary, a
+ // FixedArray parameter map for a (sloppy) arguments object.
DECL_ACCESSORS(elements, FixedArrayBase)
inline void initialize_elements();
static void ResetElements(Handle<JSObject> object);
@@ -1810,17 +1798,6 @@ class JSObject: public JSReceiver {
inline bool HasSloppyArgumentsElements();
inline bool HasDictionaryElements();
- inline bool HasExternalUint8ClampedElements();
- inline bool HasExternalArrayElements();
- inline bool HasExternalInt8Elements();
- inline bool HasExternalUint8Elements();
- inline bool HasExternalInt16Elements();
- inline bool HasExternalUint16Elements();
- inline bool HasExternalInt32Elements();
- inline bool HasExternalUint32Elements();
- inline bool HasExternalFloat32Elements();
- inline bool HasExternalFloat64Elements();
-
inline bool HasFixedTypedArrayElements();
inline bool HasFixedUint8ClampedElements();
@@ -2021,7 +1998,6 @@ class JSObject: public JSReceiver {
// Would we convert a fast elements array to dictionary mode given
// an access at key?
bool WouldConvertToSlowElements(uint32_t index);
- inline bool WouldConvertToSlowElements(Handle<Object> key);
// Computes the new capacity when expanding the elements of a JSObject.
static uint32_t NewElementsCapacity(uint32_t old_capacity) {
@@ -2069,9 +2045,9 @@ class JSObject: public JSReceiver {
// with the specified attributes (ignoring interceptors).
int NumberOfOwnProperties(PropertyAttributes filter = NONE);
// Fill in details for properties into storage starting at the specified
- // index.
- void GetOwnPropertyNames(
- FixedArray* storage, int index, PropertyAttributes filter = NONE);
+ // index. Returns the number of properties added.
+ int GetOwnPropertyNames(FixedArray* storage, int index,
+ PropertyAttributes filter = NONE);
// Returns the number of properties on this object filtering out properties
// with the specified attributes (ignoring interceptors).
@@ -2118,6 +2094,8 @@ class JSObject: public JSReceiver {
static Handle<SeededNumberDictionary> NormalizeElements(
Handle<JSObject> object);
+ void RequireSlowElements(SeededNumberDictionary* dictionary);
+
// Transform slow named properties to fast variants.
static void MigrateSlowToFast(Handle<JSObject> object,
int unused_property_fields, const char* reason);
@@ -2290,13 +2268,16 @@ class JSObject: public JSReceiver {
Handle<JSObject> object, const char* type, Handle<Name> name,
Handle<Object> old_value);
- // Gets the current elements capacity and the number of used elements.
- void GetElementsCapacityAndUsage(int* capacity, int* used);
+ // Gets the number of currently used elements.
+ int GetFastElementsUsage();
// Deletes an existing named property in a normalized object.
static void DeleteNormalizedProperty(Handle<JSObject> object,
Handle<Name> name, int entry);
+ static bool AllCanRead(LookupIterator* it);
+ static bool AllCanWrite(LookupIterator* it);
+
private:
friend class JSReceiver;
friend class Object;
@@ -2326,21 +2307,6 @@ class JSObject: public JSReceiver {
ElementsKind kind,
Object* object);
- static bool CanSetCallback(Handle<JSObject> object, Handle<Name> name);
- static void SetElementCallback(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> structure,
- PropertyAttributes attributes);
- static void SetPropertyCallback(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> structure,
- PropertyAttributes attributes);
- static void DefineElementAccessor(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> getter,
- Handle<Object> setter,
- PropertyAttributes attributes);
-
// Return the hash table backing store or the inline stored identity hash,
// whatever is found.
MUST_USE_RESULT Object* GetHiddenPropertiesHashTable();
@@ -2361,7 +2327,7 @@ class JSObject: public JSReceiver {
static Handle<Smi> GetOrCreateIdentityHash(Handle<JSObject> object);
static Handle<SeededNumberDictionary> GetNormalizedElementDictionary(
- Handle<JSObject> object);
+ Handle<JSObject> object, Handle<FixedArrayBase> elements);
// Helper for fast versions of preventExtensions, seal, and freeze.
// attrs is one of NONE, SEALED, or FROZEN (depending on the operation).
@@ -2430,11 +2396,6 @@ class FixedArray: public FixedArrayBase {
// Shrink length and insert filler objects.
void Shrink(int length);
- // Copy operation.
- static Handle<FixedArray> CopySize(Handle<FixedArray> array,
- int new_length,
- PretenureFlag pretenure = NOT_TENURED);
-
enum KeyFilter { ALL_KEYS, NON_SYMBOL_KEYS };
// Add the elements of a JSArray to this FixedArray.
@@ -2458,9 +2419,7 @@ class FixedArray: public FixedArrayBase {
static int OffsetOfElementAt(int index) { return SizeFor(index); }
// Garbage collection support.
- Object** RawFieldOfElementAt(int index) {
- return HeapObject::RawField(this, OffsetOfElementAt(index));
- }
+ inline Object** RawFieldOfElementAt(int index);
DECLARE_CAST(FixedArray)
@@ -2491,10 +2450,7 @@ class FixedArray: public FixedArrayBase {
class BodyDescriptor : public FlexibleBodyDescriptor<kHeaderSize> {
public:
- static inline int SizeOf(Map* map, HeapObject* object) {
- return SizeFor(
- reinterpret_cast<FixedArray*>(object)->synchronized_length());
- }
+ static inline int SizeOf(Map* map, HeapObject* object);
};
protected:
@@ -2659,57 +2615,26 @@ class DescriptorArray: public FixedArray {
inline bool IsEmpty();
// Returns the number of descriptors in the array.
- int number_of_descriptors() {
- DCHECK(length() >= kFirstIndex || IsEmpty());
- int len = length();
- return len == 0 ? 0 : Smi::cast(get(kDescriptorLengthIndex))->value();
- }
+ inline int number_of_descriptors();
- int number_of_descriptors_storage() {
- int len = length();
- return len == 0 ? 0 : (len - kFirstIndex) / kDescriptorSize;
- }
+ inline int number_of_descriptors_storage();
- int NumberOfSlackDescriptors() {
- return number_of_descriptors_storage() - number_of_descriptors();
- }
+ inline int NumberOfSlackDescriptors();
inline void SetNumberOfDescriptors(int number_of_descriptors);
- inline int number_of_entries() { return number_of_descriptors(); }
+ inline int number_of_entries();
- bool HasEnumCache() {
- return !IsEmpty() && !get(kEnumCacheIndex)->IsSmi();
- }
+ inline bool HasEnumCache();
- void CopyEnumCacheFrom(DescriptorArray* array) {
- set(kEnumCacheIndex, array->get(kEnumCacheIndex));
- }
+ inline void CopyEnumCacheFrom(DescriptorArray* array);
- FixedArray* GetEnumCache() {
- DCHECK(HasEnumCache());
- FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex));
- return FixedArray::cast(bridge->get(kEnumCacheBridgeCacheIndex));
- }
+ inline FixedArray* GetEnumCache();
- bool HasEnumIndicesCache() {
- if (IsEmpty()) return false;
- Object* object = get(kEnumCacheIndex);
- if (object->IsSmi()) return false;
- FixedArray* bridge = FixedArray::cast(object);
- return !bridge->get(kEnumCacheBridgeIndicesCacheIndex)->IsSmi();
- }
+ inline bool HasEnumIndicesCache();
- FixedArray* GetEnumIndicesCache() {
- DCHECK(HasEnumIndicesCache());
- FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex));
- return FixedArray::cast(bridge->get(kEnumCacheBridgeIndicesCacheIndex));
- }
+ inline FixedArray* GetEnumIndicesCache();
- Object** GetEnumCacheSlot() {
- DCHECK(HasEnumCache());
- return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
- kEnumCacheOffset);
- }
+ inline Object** GetEnumCacheSlot();
void ClearEnumCache();
@@ -2857,8 +2782,8 @@ class DescriptorArray: public FixedArray {
inline explicit Entry(DescriptorArray* descs, int index) :
descs_(descs), index_(index) { }
- inline PropertyType type() { return descs_->GetType(index_); }
- inline Object* GetCallbackObject() { return descs_->GetValue(index_); }
+ inline PropertyType type();
+ inline Object* GetCallbackObject();
private:
DescriptorArray* descs_;
@@ -2959,47 +2884,30 @@ class BaseShape {
class HashTableBase : public FixedArray {
public:
// Returns the number of elements in the hash table.
- int NumberOfElements() {
- return Smi::cast(get(kNumberOfElementsIndex))->value();
- }
+ inline int NumberOfElements();
// Returns the number of deleted elements in the hash table.
- int NumberOfDeletedElements() {
- return Smi::cast(get(kNumberOfDeletedElementsIndex))->value();
- }
+ inline int NumberOfDeletedElements();
// Returns the capacity of the hash table.
- int Capacity() {
- return Smi::cast(get(kCapacityIndex))->value();
- }
+ inline int Capacity();
// ElementAdded should be called whenever an element is added to a
// hash table.
- void ElementAdded() { SetNumberOfElements(NumberOfElements() + 1); }
+ inline void ElementAdded();
// ElementRemoved should be called whenever an element is removed from
// a hash table.
- void ElementRemoved() {
- SetNumberOfElements(NumberOfElements() - 1);
- SetNumberOfDeletedElements(NumberOfDeletedElements() + 1);
- }
- void ElementsRemoved(int n) {
- SetNumberOfElements(NumberOfElements() - n);
- SetNumberOfDeletedElements(NumberOfDeletedElements() + n);
- }
+ inline void ElementRemoved();
+ inline void ElementsRemoved(int n);
// Computes the required capacity for a table holding the given
// number of elements. May be more than HashTable::kMaxCapacity.
static inline int ComputeCapacity(int at_least_space_for);
- // Use a different heuristic to compute capacity when serializing.
- static inline int ComputeCapacityForSerialization(int at_least_space_for);
-
// Tells whether k is a real key. The hole and undefined are not allowed
// as keys and can be used to indicate missing or deleted elements.
- bool IsKey(Object* k) {
- return !k->IsTheHole() && !k->IsUndefined();
- }
+ inline bool IsKey(Object* k);
// Compute the probe offset (quadratic probing).
INLINE(static uint32_t GetProbeOffset(uint32_t n)) {
@@ -3016,14 +2924,10 @@ class HashTableBase : public FixedArray {
protected:
// Update the number of elements in the hash table.
- void SetNumberOfElements(int nof) {
- set(kNumberOfElementsIndex, Smi::FromInt(nof));
- }
+ inline void SetNumberOfElements(int nof);
// Update the number of deleted elements in the hash table.
- void SetNumberOfDeletedElements(int nod) {
- set(kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
- }
+ inline void SetNumberOfDeletedElements(int nod);
// Returns probe entry.
static uint32_t GetProbe(uint32_t hash, uint32_t number, uint32_t size) {
@@ -3283,13 +3187,10 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
enum SortMode { UNSORTED, SORTED };
- // Copies keys to preallocated fixed array.
- void CopyKeysTo(FixedArray* storage, PropertyAttributes filter,
- SortMode sort_mode);
-
// Fill in details for properties into storage.
- void CopyKeysTo(FixedArray* storage, int index, PropertyAttributes filter,
- SortMode sort_mode);
+ // Returns the number of properties added.
+ int CopyKeysTo(FixedArray* storage, int index, PropertyAttributes filter,
+ SortMode sort_mode);
// Copies enumerable keys to preallocated fixed array.
void CopyEnumKeysTo(FixedArray* storage);
@@ -3494,24 +3395,19 @@ class SeededNumberDictionary
// Type specific at put (default NONE attributes is used when adding).
MUST_USE_RESULT static Handle<SeededNumberDictionary> AtNumberPut(
- Handle<SeededNumberDictionary> dictionary,
- uint32_t key,
- Handle<Object> value);
+ Handle<SeededNumberDictionary> dictionary, uint32_t key,
+ Handle<Object> value, bool used_as_prototype);
MUST_USE_RESULT static Handle<SeededNumberDictionary> AddNumberEntry(
- Handle<SeededNumberDictionary> dictionary,
- uint32_t key,
- Handle<Object> value,
- PropertyDetails details);
+ Handle<SeededNumberDictionary> dictionary, uint32_t key,
+ Handle<Object> value, PropertyDetails details, bool used_as_prototype);
// Set an existing entry or add a new one if needed.
// Return the updated dictionary.
MUST_USE_RESULT static Handle<SeededNumberDictionary> Set(
- Handle<SeededNumberDictionary> dictionary,
- uint32_t key,
- Handle<Object> value,
- PropertyDetails details);
+ Handle<SeededNumberDictionary> dictionary, uint32_t key,
+ Handle<Object> value, PropertyDetails details, bool used_as_prototype);
- void UpdateMaxNumberKey(uint32_t key);
+ void UpdateMaxNumberKey(uint32_t key, bool used_as_prototype);
// If slow elements are required we will never go back to fast-case
// for the elements kept in this dictionary. We require slow
@@ -3793,9 +3689,7 @@ class OrderedHashMap
public:
DECLARE_CAST(OrderedHashMap)
- Object* ValueAt(int entry) {
- return get(EntryToIndex(entry) + kValueOffset);
- }
+ inline Object* ValueAt(int entry);
static const int kValueOffset = 1;
};
@@ -3868,41 +3762,6 @@ class WeakValueHashTable : public ObjectHashTable {
};
-// JSFunctionResultCache caches results of some JSFunction invocation.
-// It is a fixed array with fixed structure:
-// [0]: factory function
-// [1]: finger index
-// [2]: current cache size
-// [3]: dummy field.
-// The rest of array are key/value pairs.
-class JSFunctionResultCache : public FixedArray {
- public:
- static const int kFactoryIndex = 0;
- static const int kFingerIndex = kFactoryIndex + 1;
- static const int kCacheSizeIndex = kFingerIndex + 1;
- static const int kDummyIndex = kCacheSizeIndex + 1;
- static const int kEntriesIndex = kDummyIndex + 1;
-
- static const int kEntrySize = 2; // key + value
-
- static const int kFactoryOffset = kHeaderSize;
- static const int kFingerOffset = kFactoryOffset + kPointerSize;
- static const int kCacheSizeOffset = kFingerOffset + kPointerSize;
-
- inline void MakeZeroSize();
- inline void Clear();
-
- inline int size();
- inline void set_size(int size);
- inline int finger_index();
- inline void set_finger_index(int finger_index);
-
- DECLARE_CAST(JSFunctionResultCache)
-
- DECLARE_VERIFIER(JSFunctionResultCache)
-};
-
-
// ScopeInfo represents information about different scopes of a source
// program and the allocation of the scope's variables. Scope information
// is stored in a compressed form in ScopeInfo objects and is used
@@ -3962,14 +3821,12 @@ class ScopeInfo : public FixedArray {
bool HasContext();
// Return if this is a function scope with "use asm".
- bool IsAsmModule() { return AsmModuleField::decode(Flags()); }
+ inline bool IsAsmModule();
// Return if this is a nested function within an asm module scope.
- bool IsAsmFunction() { return AsmFunctionField::decode(Flags()); }
+ inline bool IsAsmFunction();
- bool IsSimpleParameterList() {
- return IsSimpleParameterListField::decode(Flags());
- }
+ inline bool HasSimpleParameters();
// Return the function_name if present.
String* FunctionName();
@@ -4022,6 +3879,9 @@ class ScopeInfo : public FixedArray {
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag);
+ // Lookup the name of a certain context slot by its index.
+ String* ContextSlotName(int slot_index);
+
// Lookup support for serialized scope info. Returns the
// parameter index for a given parameter name if the parameter is present;
// otherwise returns a value < 0. The name must be an internalized string.
@@ -4040,12 +3900,6 @@ class ScopeInfo : public FixedArray {
FunctionKind function_kind();
- // Copies all the context locals into an object used to materialize a scope.
- static void CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
- Handle<Context> context,
- Handle<JSObject> scope_object);
-
-
static Handle<ScopeInfo> Create(Isolate* isolate, Zone* zone, Scope* scope);
static Handle<ScopeInfo> CreateGlobalThisBinding(Isolate* isolate);
@@ -4064,37 +3918,28 @@ class ScopeInfo : public FixedArray {
// 3. The number of non-parameter variables allocated on the stack.
// 4. The number of non-parameter and parameter variables allocated in the
// context.
-#define FOR_EACH_NUMERIC_FIELD(V) \
- V(Flags) \
- V(ParameterCount) \
- V(StackLocalCount) \
- V(ContextLocalCount) \
- V(ContextGlobalCount) \
+#define FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(V) \
+ V(Flags) \
+ V(ParameterCount) \
+ V(StackLocalCount) \
+ V(ContextLocalCount) \
+ V(ContextGlobalCount) \
V(StrongModeFreeVariableCount)
-#define FIELD_ACCESSORS(name) \
- void Set##name(int value) { \
- set(k##name, Smi::FromInt(value)); \
- } \
- int name() { \
- if (length() > 0) { \
- return Smi::cast(get(k##name))->value(); \
- } else { \
- return 0; \
- } \
- }
- FOR_EACH_NUMERIC_FIELD(FIELD_ACCESSORS)
+#define FIELD_ACCESSORS(name) \
+ inline void Set##name(int value); \
+ inline int name();
+ FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(FIELD_ACCESSORS)
#undef FIELD_ACCESSORS
+ private:
enum {
#define DECL_INDEX(name) k##name,
- FOR_EACH_NUMERIC_FIELD(DECL_INDEX)
+ FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(DECL_INDEX)
#undef DECL_INDEX
-#undef FOR_EACH_NUMERIC_FIELD
kVariablePartIndex
};
- private:
// The layout of the variable part of a ScopeInfo is as follows:
// 1. ParameterEntries:
// This part stores the names of the parameters for function scopes. One
@@ -4170,10 +4015,10 @@ class ScopeInfo : public FixedArray {
class AsmModuleField : public BitField<bool, FunctionVariableMode::kNext, 1> {
};
class AsmFunctionField : public BitField<bool, AsmModuleField::kNext, 1> {};
- class IsSimpleParameterListField
+ class HasSimpleParametersField
: public BitField<bool, AsmFunctionField::kNext, 1> {};
class FunctionKindField
- : public BitField<FunctionKind, IsSimpleParameterListField::kNext, 8> {};
+ : public BitField<FunctionKind, HasSimpleParametersField::kNext, 8> {};
// BitFields representing the encoded information for context locals in the
// ContextLocalInfoEntries part.
@@ -4181,6 +4026,8 @@ class ScopeInfo : public FixedArray {
class ContextLocalInitFlag: public BitField<InitializationFlag, 3, 1> {};
class ContextLocalMaybeAssignedFlag
: public BitField<MaybeAssignedFlag, 4, 1> {};
+
+ friend class ScopeIterator;
};
@@ -4217,7 +4064,7 @@ class NormalizedMapCache: public FixedArray {
// that is attached to code objects.
class ByteArray: public FixedArrayBase {
public:
- inline int Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
+ inline int Size();
// Setter and getter.
inline byte get(int index);
@@ -4248,9 +4095,7 @@ class ByteArray: public FixedArrayBase {
DECLARE_CAST(ByteArray)
// Dispatched behavior.
- inline int ByteArraySize() {
- return SizeFor(this->length());
- }
+ inline int ByteArraySize();
DECLARE_PRINTER(ByteArray)
DECLARE_VERIFIER(ByteArray)
@@ -4267,6 +4112,50 @@ class ByteArray: public FixedArrayBase {
};
+// BytecodeArray represents a sequence of interpreter bytecodes.
+class BytecodeArray : public FixedArrayBase {
+ public:
+ static int SizeFor(int length) {
+ return OBJECT_POINTER_ALIGN(kHeaderSize + length);
+ }
+
+ // Setter and getter
+ inline byte get(int index);
+ inline void set(int index, byte value);
+
+ // Returns data start address.
+ inline Address GetFirstBytecodeAddress();
+
+ // Accessors for frame size and the number of locals
+ inline int frame_size() const;
+ inline void set_frame_size(int value);
+
+ DECLARE_CAST(BytecodeArray)
+
+ // Dispatched behavior.
+ inline int BytecodeArraySize();
+
+ DECLARE_PRINTER(BytecodeArray)
+ DECLARE_VERIFIER(BytecodeArray)
+
+ void Disassemble(std::ostream& os);
+
+ // Layout description.
+ static const int kFrameSizeOffset = FixedArrayBase::kHeaderSize;
+ static const int kHeaderSize = kFrameSizeOffset + kIntSize;
+
+ static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
+
+ // Maximal memory consumption for a single BytecodeArray.
+ static const int kMaxSize = 512 * MB;
+ // Maximal length of a single BytecodeArray.
+ static const int kMaxLength = kMaxSize - kHeaderSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BytecodeArray);
+};
+
+
// FreeSpace are fixed-size free memory blocks used by the heap and GC.
// They look like heap objects (are heap object tagged and have a map) so that
// the heap remains iterable. They have a size and a next pointer.
@@ -4281,7 +4170,7 @@ class FreeSpace: public HeapObject {
inline int nobarrier_size() const;
inline void nobarrier_set_size(int value);
- inline int Size() { return size(); }
+ inline int Size();
// Accessors for the next field.
inline FreeSpace* next();
@@ -4317,259 +4206,16 @@ class FreeSpace: public HeapObject {
V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1)
-
-// An ExternalArray represents a fixed-size array of primitive values
-// which live outside the JavaScript heap. Its subclasses are used to
-// implement the CanvasArray types being defined in the WebGL
-// specification. As of this writing the first public draft is not yet
-// available, but Khronos members can access the draft at:
-// https://cvs.khronos.org/svn/repos/3dweb/trunk/doc/spec/WebGL-spec.html
-//
-// The semantics of these arrays differ from CanvasPixelArray.
-// Out-of-range values passed to the setter are converted via a C
-// cast, not clamping. Out-of-range indices cause exceptions to be
-// raised rather than being silently ignored.
-class ExternalArray: public FixedArrayBase {
- public:
- inline bool is_the_hole(int index) { return false; }
-
- // [external_pointer]: The pointer to the external memory area backing this
- // external array.
- DECL_ACCESSORS(external_pointer, void) // Pointer to the data store.
-
- DECLARE_CAST(ExternalArray)
-
- // Maximal acceptable length for an external array.
- static const int kMaxLength = 0x3fffffff;
-
- // ExternalArray headers are not quadword aligned.
- static const int kExternalPointerOffset =
- POINTER_SIZE_ALIGN(FixedArrayBase::kLengthOffset + kPointerSize);
- static const int kSize = kExternalPointerOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalArray);
-};
-
-
-// A ExternalUint8ClampedArray represents a fixed-size byte array with special
-// semantics used for implementing the CanvasPixelArray object. Please see the
-// specification at:
-
-// http://www.whatwg.org/specs/web-apps/current-work/
-// multipage/the-canvas-element.html#canvaspixelarray
-// In particular, write access clamps the value written to 0 or 255 if the
-// value written is outside this range.
-class ExternalUint8ClampedArray: public ExternalArray {
- public:
- inline uint8_t* external_uint8_clamped_pointer();
-
- // Setter and getter.
- inline uint8_t get_scalar(int index);
- static inline Handle<Object> get(Handle<ExternalUint8ClampedArray> array,
- int index);
- inline void set(int index, uint8_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined and clamps the converted value between 0 and 255.
- void SetValue(uint32_t index, Object* value);
-
- DECLARE_CAST(ExternalUint8ClampedArray)
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalUint8ClampedArray)
- DECLARE_VERIFIER(ExternalUint8ClampedArray)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUint8ClampedArray);
-};
-
-
-class ExternalInt8Array: public ExternalArray {
- public:
- // Setter and getter.
- inline int8_t get_scalar(int index);
- static inline Handle<Object> get(Handle<ExternalInt8Array> array, int index);
- inline void set(int index, int8_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- void SetValue(uint32_t index, Object* value);
-
- DECLARE_CAST(ExternalInt8Array)
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalInt8Array)
- DECLARE_VERIFIER(ExternalInt8Array)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalInt8Array);
-};
-
-
-class ExternalUint8Array: public ExternalArray {
- public:
- // Setter and getter.
- inline uint8_t get_scalar(int index);
- static inline Handle<Object> get(Handle<ExternalUint8Array> array, int index);
- inline void set(int index, uint8_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- void SetValue(uint32_t index, Object* value);
-
- DECLARE_CAST(ExternalUint8Array)
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalUint8Array)
- DECLARE_VERIFIER(ExternalUint8Array)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUint8Array);
-};
-
-
-class ExternalInt16Array: public ExternalArray {
- public:
- // Setter and getter.
- inline int16_t get_scalar(int index);
- static inline Handle<Object> get(Handle<ExternalInt16Array> array, int index);
- inline void set(int index, int16_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- void SetValue(uint32_t index, Object* value);
-
- DECLARE_CAST(ExternalInt16Array)
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalInt16Array)
- DECLARE_VERIFIER(ExternalInt16Array)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalInt16Array);
-};
-
-
-class ExternalUint16Array: public ExternalArray {
- public:
- // Setter and getter.
- inline uint16_t get_scalar(int index);
- static inline Handle<Object> get(Handle<ExternalUint16Array> array,
- int index);
- inline void set(int index, uint16_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- void SetValue(uint32_t index, Object* value);
-
- DECLARE_CAST(ExternalUint16Array)
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalUint16Array)
- DECLARE_VERIFIER(ExternalUint16Array)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUint16Array);
-};
-
-
-class ExternalInt32Array: public ExternalArray {
- public:
- // Setter and getter.
- inline int32_t get_scalar(int index);
- static inline Handle<Object> get(Handle<ExternalInt32Array> array, int index);
- inline void set(int index, int32_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- void SetValue(uint32_t index, Object* value);
-
- DECLARE_CAST(ExternalInt32Array)
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalInt32Array)
- DECLARE_VERIFIER(ExternalInt32Array)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalInt32Array);
-};
-
-
-class ExternalUint32Array: public ExternalArray {
- public:
- // Setter and getter.
- inline uint32_t get_scalar(int index);
- static inline Handle<Object> get(Handle<ExternalUint32Array> array,
- int index);
- inline void set(int index, uint32_t value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- void SetValue(uint32_t index, Object* value);
-
- DECLARE_CAST(ExternalUint32Array)
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalUint32Array)
- DECLARE_VERIFIER(ExternalUint32Array)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUint32Array);
-};
-
-
-class ExternalFloat32Array: public ExternalArray {
- public:
- // Setter and getter.
- inline float get_scalar(int index);
- static inline Handle<Object> get(Handle<ExternalFloat32Array> array,
- int index);
- inline void set(int index, float value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- void SetValue(uint32_t index, Object* value);
-
- DECLARE_CAST(ExternalFloat32Array)
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalFloat32Array)
- DECLARE_VERIFIER(ExternalFloat32Array)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloat32Array);
-};
-
-
-class ExternalFloat64Array: public ExternalArray {
- public:
- // Setter and getter.
- inline double get_scalar(int index);
- static inline Handle<Object> get(Handle<ExternalFloat64Array> array,
- int index);
- inline void set(int index, double value);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- void SetValue(uint32_t index, Object* value);
-
- DECLARE_CAST(ExternalFloat64Array)
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExternalFloat64Array)
- DECLARE_VERIFIER(ExternalFloat64Array)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloat64Array);
-};
-
-
class FixedTypedArrayBase: public FixedArrayBase {
public:
- // [base_pointer]: For now, points to the FixedTypedArrayBase itself.
+ // [base_pointer]: Either points to the FixedTypedArrayBase itself or nullptr.
DECL_ACCESSORS(base_pointer, Object)
+ // [external_pointer]: Contains the offset between base_pointer and the start
+ // of the data. If the base_pointer is a nullptr, the external_pointer
+ // therefore points to the actual backing store.
+ DECL_ACCESSORS(external_pointer, void)
+
// Dispatched behavior.
inline void FixedTypedArrayBaseIterateBody(ObjectVisitor* v);
@@ -4578,11 +4224,12 @@ class FixedTypedArrayBase: public FixedArrayBase {
DECLARE_CAST(FixedTypedArrayBase)
- static const int kBasePointerOffset =
- FixedArrayBase::kHeaderSize + kPointerSize;
- static const int kHeaderSize = kBasePointerOffset + kPointerSize;
+ static const int kBasePointerOffset = FixedArrayBase::kHeaderSize;
+ static const int kExternalPointerOffset = kBasePointerOffset + kPointerSize;
+ static const int kHeaderSize =
+ DOUBLE_POINTER_ALIGN(kExternalPointerOffset + kPointerSize);
- static const int kDataOffset = DOUBLE_POINTER_ALIGN(kHeaderSize);
+ static const int kDataOffset = kHeaderSize;
inline int size();
@@ -4675,52 +4322,38 @@ class DeoptimizationInputData: public FixedArray {
static const int kDeoptEntrySize = 4;
// Simple element accessors.
-#define DEFINE_ELEMENT_ACCESSORS(name, type) \
- type* name() { \
- return type::cast(get(k##name##Index)); \
- } \
- void Set##name(type* value) { \
- set(k##name##Index, value); \
- }
+#define DECLARE_ELEMENT_ACCESSORS(name, type) \
+ inline type* name(); \
+ inline void Set##name(type* value);
- DEFINE_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
- DEFINE_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
- DEFINE_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
- DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi)
- DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
- DEFINE_ELEMENT_ACCESSORS(OptimizationId, Smi)
- DEFINE_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
- DEFINE_ELEMENT_ACCESSORS(WeakCellCache, Object)
+ DECLARE_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
+ DECLARE_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
+ DECLARE_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
+ DECLARE_ELEMENT_ACCESSORS(OsrAstId, Smi)
+ DECLARE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
+ DECLARE_ELEMENT_ACCESSORS(OptimizationId, Smi)
+ DECLARE_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
+ DECLARE_ELEMENT_ACCESSORS(WeakCellCache, Object)
-#undef DEFINE_ELEMENT_ACCESSORS
+#undef DECLARE_ELEMENT_ACCESSORS
// Accessors for elements of the ith deoptimization entry.
-#define DEFINE_ENTRY_ACCESSORS(name, type) \
- type* name(int i) { \
- return type::cast(get(IndexForEntry(i) + k##name##Offset)); \
- } \
- void Set##name(int i, type* value) { \
- set(IndexForEntry(i) + k##name##Offset, value); \
- }
+#define DECLARE_ENTRY_ACCESSORS(name, type) \
+ inline type* name(int i); \
+ inline void Set##name(int i, type* value);
- DEFINE_ENTRY_ACCESSORS(AstIdRaw, Smi)
- DEFINE_ENTRY_ACCESSORS(TranslationIndex, Smi)
- DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
- DEFINE_ENTRY_ACCESSORS(Pc, Smi)
+ DECLARE_ENTRY_ACCESSORS(AstIdRaw, Smi)
+ DECLARE_ENTRY_ACCESSORS(TranslationIndex, Smi)
+ DECLARE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
+ DECLARE_ENTRY_ACCESSORS(Pc, Smi)
-#undef DEFINE_DEOPT_ENTRY_ACCESSORS
+#undef DECLARE_ENTRY_ACCESSORS
- BailoutId AstId(int i) {
- return BailoutId(AstIdRaw(i)->value());
- }
+ inline BailoutId AstId(int i);
- void SetAstId(int i, BailoutId value) {
- SetAstIdRaw(i, Smi::FromInt(value.ToInt()));
- }
+ inline void SetAstId(int i, BailoutId value);
- int DeoptCount() {
- return (length() - kFirstDeoptEntryIndex) / kDeoptEntrySize;
- }
+ inline int DeoptCount();
// Allocates a DeoptimizationInputData.
static Handle<DeoptimizationInputData> New(Isolate* isolate,
@@ -4750,18 +4383,14 @@ class DeoptimizationInputData: public FixedArray {
// [i * 2 + 1]: PC and state of ith deoptimization
class DeoptimizationOutputData: public FixedArray {
public:
- int DeoptPoints() { return length() / 2; }
+ inline int DeoptPoints();
- BailoutId AstId(int index) {
- return BailoutId(Smi::cast(get(index * 2))->value());
- }
+ inline BailoutId AstId(int index);
- void SetAstId(int index, BailoutId id) {
- set(index * 2, Smi::FromInt(id.ToInt()));
- }
+ inline void SetAstId(int index, BailoutId id);
- Smi* PcAndState(int index) { return Smi::cast(get(1 + index * 2)); }
- void SetPcAndState(int index, Smi* offset) { set(1 + index * 2, offset); }
+ inline Smi* PcAndState(int index);
+ inline void SetPcAndState(int index, Smi* offset);
static int LengthOfFixedArray(int deopt_points) {
return deopt_points * 2;
@@ -4797,30 +4426,14 @@ class HandlerTable : public FixedArray {
enum CatchPrediction { UNCAUGHT, CAUGHT };
// Accessors for handler table based on ranges.
- void SetRangeStart(int index, int value) {
- set(index * kRangeEntrySize + kRangeStartIndex, Smi::FromInt(value));
- }
- void SetRangeEnd(int index, int value) {
- set(index * kRangeEntrySize + kRangeEndIndex, Smi::FromInt(value));
- }
- void SetRangeHandler(int index, int offset, CatchPrediction prediction) {
- int value = HandlerOffsetField::encode(offset) |
- HandlerPredictionField::encode(prediction);
- set(index * kRangeEntrySize + kRangeHandlerIndex, Smi::FromInt(value));
- }
- void SetRangeDepth(int index, int value) {
- set(index * kRangeEntrySize + kRangeDepthIndex, Smi::FromInt(value));
- }
+ inline void SetRangeStart(int index, int value);
+ inline void SetRangeEnd(int index, int value);
+ inline void SetRangeHandler(int index, int offset, CatchPrediction pred);
+ inline void SetRangeDepth(int index, int value);
// Accessors for handler table based on return addresses.
- void SetReturnOffset(int index, int value) {
- set(index * kReturnEntrySize + kReturnOffsetIndex, Smi::FromInt(value));
- }
- void SetReturnHandler(int index, int offset, CatchPrediction prediction) {
- int value = HandlerOffsetField::encode(offset) |
- HandlerPredictionField::encode(prediction);
- set(index * kReturnEntrySize + kReturnHandlerIndex, Smi::FromInt(value));
- }
+ inline void SetReturnOffset(int index, int value);
+ inline void SetReturnHandler(int index, int offset, CatchPrediction pred);
// Lookup handler in a table based on ranges.
int LookupRange(int pc_offset, int* stack_depth, CatchPrediction* prediction);
@@ -4987,26 +4600,22 @@ class Code: public HeapObject {
// Testers for IC stub kinds.
inline bool is_inline_cache_stub();
inline bool is_debug_stub();
- inline bool is_handler() { return kind() == HANDLER; }
- inline bool is_load_stub() { return kind() == LOAD_IC; }
- inline bool is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; }
- inline bool is_store_stub() { return kind() == STORE_IC; }
- inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
- inline bool is_call_stub() { return kind() == CALL_IC; }
- inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; }
- inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; }
- inline bool is_compare_nil_ic_stub() { return kind() == COMPARE_NIL_IC; }
- inline bool is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
+ inline bool is_handler();
+ inline bool is_load_stub();
+ inline bool is_keyed_load_stub();
+ inline bool is_store_stub();
+ inline bool is_keyed_store_stub();
+ inline bool is_call_stub();
+ inline bool is_binary_op_stub();
+ inline bool is_compare_ic_stub();
+ inline bool is_compare_nil_ic_stub();
+ inline bool is_to_boolean_ic_stub();
inline bool is_keyed_stub();
- inline bool is_optimized_code() { return kind() == OPTIMIZED_FUNCTION; }
- inline bool embeds_maps_weakly() {
- Kind k = kind();
- return (k == LOAD_IC || k == STORE_IC || k == KEYED_LOAD_IC ||
- k == KEYED_STORE_IC || k == COMPARE_NIL_IC) &&
- ic_state() == MONOMORPHIC;
- }
+ inline bool is_optimized_code();
+ inline bool embeds_maps_weakly();
inline bool IsCodeStubOrIC();
+ inline bool IsJavaScriptCode();
inline void set_raw_kind_specific_flags1(int value);
inline void set_raw_kind_specific_flags2(int value);
@@ -5038,11 +4647,6 @@ class Code: public HeapObject {
inline bool has_debug_break_slots();
inline void set_has_debug_break_slots(bool value);
- // [compiled_with_optimizing]: For FUNCTION kind, tells if it has
- // been compiled with IsOptimizing set to true.
- inline bool is_compiled_optimizable();
- inline void set_compiled_optimizable(bool value);
-
// [has_reloc_info_for_serialization]: For FUNCTION kind, tells if its
// reloc info includes runtime and external references to support
// serialization/deserialization.
@@ -5209,12 +4813,7 @@ class Code: public HeapObject {
// Calculate the size of the code object to report for log events. This takes
// the layout of the code object into account.
- int ExecutableSize() {
- // Check that the assumptions about the layout of the code object holds.
- DCHECK_EQ(static_cast<int>(instruction_start() - address()),
- Code::kHeaderSize);
- return instruction_size() + Code::kHeaderSize;
- }
+ inline int ExecutableSize();
// Locating source position.
int SourcePosition(Address pc);
@@ -5223,7 +4822,7 @@ class Code: public HeapObject {
DECLARE_CAST(Code)
// Dispatched behavior.
- int CodeSize() { return SizeFor(body_size()); }
+ inline int CodeSize();
inline void CodeIterateBody(ObjectVisitor* v);
template<typename StaticVisitor>
@@ -5280,18 +4879,12 @@ class Code: public HeapObject {
#ifdef DEBUG
enum VerifyMode { kNoContextSpecificPointers, kNoContextRetainingPointers };
void VerifyEmbeddedObjects(VerifyMode mode = kNoContextRetainingPointers);
+ static void VerifyRecompiledCode(Code* old_code, Code* new_code);
#endif // DEBUG
- inline bool CanContainWeakObjects() {
- // is_turbofanned() implies !can_have_weak_objects().
- DCHECK(!is_optimized_code() || !is_turbofanned() ||
- !can_have_weak_objects());
- return is_optimized_code() && can_have_weak_objects();
- }
+ inline bool CanContainWeakObjects();
- inline bool IsWeakObject(Object* object) {
- return (CanContainWeakObjects() && IsWeakObjectInOptimizedCode(object));
- }
+ inline bool IsWeakObject(Object* object);
static inline bool IsWeakObjectInOptimizedCode(Object* object);
@@ -5337,11 +4930,10 @@ class Code: public HeapObject {
class FullCodeFlagsHasDeoptimizationSupportField:
public BitField<bool, 0, 1> {}; // NOLINT
class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
- class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {};
class FullCodeFlagsHasRelocInfoForSerialization
- : public BitField<bool, 3, 1> {};
-
- static const int kProfilerTicksOffset = kFullCodeFlags + 1;
+ : public BitField<bool, 2, 1> {};
+ // Bit 3 in this bitfield is unused.
+ class ProfilerTicksField : public BitField<int, 4, 28> {};
// Flags layout. BitField<type, shift, size>.
class ICStateField : public BitField<InlineCacheState, 0, 4> {};
@@ -5381,7 +4973,7 @@ class Code: public HeapObject {
// KindSpecificFlags2 layout (STUB and OPTIMIZED_FUNCTION)
static const int kSafepointTableOffsetFirstBit = kIsCrankshaftedBit + 1;
- static const int kSafepointTableOffsetBitCount = 24;
+ static const int kSafepointTableOffsetBitCount = 30;
STATIC_ASSERT(kSafepointTableOffsetFirstBit +
kSafepointTableOffsetBitCount <= 32);
@@ -5561,13 +5153,23 @@ class Map: public HeapObject {
inline int instance_size();
inline void set_instance_size(int value);
- // Count of properties allocated in the object.
- inline int inobject_properties();
- inline void set_inobject_properties(int value);
-
- // Count of property fields pre-allocated in the object when first allocated.
- inline int pre_allocated_property_fields();
- inline void set_pre_allocated_property_fields(int value);
+ // Only to clear an unused byte, remove once byte is used.
+ inline void clear_unused();
+
+ // [inobject_properties_or_constructor_function_index]: Provides access
+ // to the inobject properties in case of JSObject maps, or the constructor
+ // function index in case of primitive maps.
+ inline int inobject_properties_or_constructor_function_index();
+ inline void set_inobject_properties_or_constructor_function_index(int value);
+ // Count of properties allocated in the object (JSObject only).
+ inline int GetInObjectProperties();
+ inline void SetInObjectProperties(int value);
+ // Index of the constructor function in the native context (primitives only),
+ // or the special sentinel value to indicate that there is no object wrapper
+ // for the primitive (i.e. in case of null or undefined).
+ static const int kNoConstructorFunctionIndex = 0;
+ inline int GetConstructorFunctionIndex();
+ inline void SetConstructorFunctionIndex(int value);
// Instance type.
inline InstanceType instance_type();
@@ -5631,31 +5233,16 @@ class Map: public HeapObject {
// Tells whether the instance with this map should be ignored by the
// Object.getPrototypeOf() function and the __proto__ accessor.
- inline void set_is_hidden_prototype() {
- set_bit_field(bit_field() | (1 << kIsHiddenPrototype));
- }
-
- inline bool is_hidden_prototype() {
- return ((1 << kIsHiddenPrototype) & bit_field()) != 0;
- }
+ inline void set_is_hidden_prototype();
+ inline bool is_hidden_prototype();
// Records and queries whether the instance has a named interceptor.
- inline void set_has_named_interceptor() {
- set_bit_field(bit_field() | (1 << kHasNamedInterceptor));
- }
-
- inline bool has_named_interceptor() {
- return ((1 << kHasNamedInterceptor) & bit_field()) != 0;
- }
+ inline void set_has_named_interceptor();
+ inline bool has_named_interceptor();
// Records and queries whether the instance has an indexed interceptor.
- inline void set_has_indexed_interceptor() {
- set_bit_field(bit_field() | (1 << kHasIndexedInterceptor));
- }
-
- inline bool has_indexed_interceptor() {
- return ((1 << kHasIndexedInterceptor) & bit_field()) != 0;
- }
+ inline void set_has_indexed_interceptor();
+ inline bool has_indexed_interceptor();
// Tells whether the instance is undetectable.
// An undetectable object is a special class of JSObject: 'typeof' operator
@@ -5663,22 +5250,12 @@ class Map: public HeapObject {
// a normal JS object. It is useful for implementing undetectable
// document.all in Firefox & Safari.
// See https://bugzilla.mozilla.org/show_bug.cgi?id=248549.
- inline void set_is_undetectable() {
- set_bit_field(bit_field() | (1 << kIsUndetectable));
- }
-
- inline bool is_undetectable() {
- return ((1 << kIsUndetectable) & bit_field()) != 0;
- }
+ inline void set_is_undetectable();
+ inline bool is_undetectable();
// Tells whether the instance has a call-as-function handler.
- inline void set_is_observed() {
- set_bit_field(bit_field() | (1 << kIsObserved));
- }
-
- inline bool is_observed() {
- return ((1 << kIsObserved) & bit_field()) != 0;
- }
+ inline void set_is_observed();
+ inline bool is_observed();
inline void set_is_strong();
inline bool is_strong();
@@ -5687,54 +5264,20 @@ class Map: public HeapObject {
inline void set_is_prototype_map(bool value);
inline bool is_prototype_map() const;
- inline void set_elements_kind(ElementsKind elements_kind) {
- DCHECK(static_cast<int>(elements_kind) < kElementsKindCount);
- DCHECK(kElementsKindCount <= (1 << Map::ElementsKindBits::kSize));
- set_bit_field2(Map::ElementsKindBits::update(bit_field2(), elements_kind));
- DCHECK(this->elements_kind() == elements_kind);
- }
-
- inline ElementsKind elements_kind() {
- return Map::ElementsKindBits::decode(bit_field2());
- }
+ inline void set_elements_kind(ElementsKind elements_kind);
+ inline ElementsKind elements_kind();
// Tells whether the instance has fast elements that are only Smis.
- inline bool has_fast_smi_elements() {
- return IsFastSmiElementsKind(elements_kind());
- }
+ inline bool has_fast_smi_elements();
// Tells whether the instance has fast elements.
- inline bool has_fast_object_elements() {
- return IsFastObjectElementsKind(elements_kind());
- }
-
- inline bool has_fast_smi_or_object_elements() {
- return IsFastSmiOrObjectElementsKind(elements_kind());
- }
-
- inline bool has_fast_double_elements() {
- return IsFastDoubleElementsKind(elements_kind());
- }
-
- inline bool has_fast_elements() {
- return IsFastElementsKind(elements_kind());
- }
-
- inline bool has_sloppy_arguments_elements() {
- return IsSloppyArgumentsElements(elements_kind());
- }
-
- inline bool has_external_array_elements() {
- return IsExternalArrayElementsKind(elements_kind());
- }
-
- inline bool has_fixed_typed_array_elements() {
- return IsFixedTypedArrayElementsKind(elements_kind());
- }
-
- inline bool has_dictionary_elements() {
- return IsDictionaryElementsKind(elements_kind());
- }
+ inline bool has_fast_object_elements();
+ inline bool has_fast_smi_or_object_elements();
+ inline bool has_fast_double_elements();
+ inline bool has_fast_elements();
+ inline bool has_sloppy_arguments_elements();
+ inline bool has_fixed_typed_array_elements();
+ inline bool has_dictionary_elements();
static bool IsValidElementsTransition(ElementsKind from_kind,
ElementsKind to_kind);
@@ -5784,9 +5327,8 @@ class Map: public HeapObject {
// TODO(ishell): moveit!
static Handle<Map> GeneralizeAllFieldRepresentations(Handle<Map> map);
MUST_USE_RESULT static Handle<HeapType> GeneralizeFieldType(
- Handle<HeapType> type1,
- Handle<HeapType> type2,
- Isolate* isolate);
+ Representation rep1, Handle<HeapType> type1, Representation rep2,
+ Handle<HeapType> type2, Isolate* isolate);
static void GeneralizeFieldType(Handle<Map> map, int modify_index,
Representation new_representation,
Handle<HeapType> new_field_type);
@@ -5803,8 +5345,6 @@ class Map: public HeapObject {
static Handle<Map> PrepareForDataProperty(Handle<Map> old_map,
int descriptor_number,
Handle<Object> value);
- static Handle<Map> PrepareForDataElement(Handle<Map> old_map,
- Handle<Object> value);
static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode,
const char* reason);
@@ -5879,35 +5419,15 @@ class Map: public HeapObject {
inline PropertyDetails GetLastDescriptorDetails();
- int LastAdded() {
- int number_of_own_descriptors = NumberOfOwnDescriptors();
- DCHECK(number_of_own_descriptors > 0);
- return number_of_own_descriptors - 1;
- }
-
- int NumberOfOwnDescriptors() {
- return NumberOfOwnDescriptorsBits::decode(bit_field3());
- }
+ inline int LastAdded();
- void SetNumberOfOwnDescriptors(int number) {
- DCHECK(number <= instance_descriptors()->number_of_descriptors());
- set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number));
- }
+ inline int NumberOfOwnDescriptors();
+ inline void SetNumberOfOwnDescriptors(int number);
inline Cell* RetrieveDescriptorsPointer();
- int EnumLength() {
- return EnumLengthBits::decode(bit_field3());
- }
-
- void SetEnumLength(int length) {
- if (length != kInvalidEnumCacheSentinel) {
- DCHECK(length >= 0);
- DCHECK(length == 0 || instance_descriptors()->HasEnumCache());
- DCHECK(length <= NumberOfOwnDescriptors());
- }
- set_bit_field3(EnumLengthBits::update(bit_field3(), length));
- }
+ inline int EnumLength();
+ inline void SetEnumLength(int length);
inline bool owns_descriptors();
inline void set_owns_descriptors(bool owns_descriptors);
@@ -6012,13 +5532,6 @@ class Map: public HeapObject {
int NumberOfDescribedProperties(DescriptorFlag which = OWN_DESCRIPTORS,
PropertyAttributes filter = NONE);
- // Returns the number of slots allocated for the initial properties
- // backing storage for instances of this map.
- int InitialPropertiesLength() {
- return pre_allocated_property_fields() + unused_property_fields() -
- inobject_properties();
- }
-
DECLARE_CAST(Map)
// Code cache operations.
@@ -6036,8 +5549,7 @@ class Map: public HeapObject {
static void AppendCallbackDescriptors(Handle<Map> map,
Handle<Object> descriptors);
- static inline int SlackForArraySize(bool is_prototype_map, int old_size,
- int size_limit);
+ static inline int SlackForArraySize(int old_size, int size_limit);
static void EnsureDescriptorSlack(Handle<Map> map, int slack);
@@ -6066,30 +5578,16 @@ class Map: public HeapObject {
static Handle<Map> FindTransitionedMap(Handle<Map> map,
MapHandleList* candidates);
- bool CanTransition() {
- // Only JSObject and subtypes have map transitions and back pointers.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
- return instance_type() >= FIRST_JS_OBJECT_TYPE;
- }
+ inline bool CanTransition();
- bool IsJSObjectMap() {
- return instance_type() >= FIRST_JS_OBJECT_TYPE;
- }
- bool IsStringMap() { return instance_type() < FIRST_NONSTRING_TYPE; }
- bool IsJSProxyMap() {
- InstanceType type = instance_type();
- return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE;
- }
- bool IsJSGlobalProxyMap() {
- return instance_type() == JS_GLOBAL_PROXY_TYPE;
- }
- bool IsJSGlobalObjectMap() {
- return instance_type() == JS_GLOBAL_OBJECT_TYPE;
- }
- bool IsGlobalObjectMap() {
- const InstanceType type = instance_type();
- return type == JS_GLOBAL_OBJECT_TYPE || type == JS_BUILTINS_OBJECT_TYPE;
- }
+ inline bool IsPrimitiveMap();
+ inline bool IsJSObjectMap();
+ inline bool IsJSArrayMap();
+ inline bool IsStringMap();
+ inline bool IsJSProxyMap();
+ inline bool IsJSGlobalProxyMap();
+ inline bool IsJSGlobalObjectMap();
+ inline bool IsGlobalObjectMap();
inline bool CanOmitMapChecks();
@@ -6152,12 +5650,12 @@ class Map: public HeapObject {
// Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
- static const int kInObjectPropertiesByte = 1;
- static const int kInObjectPropertiesOffset =
- kInstanceSizesOffset + kInObjectPropertiesByte;
- static const int kPreAllocatedPropertyFieldsByte = 2;
- static const int kPreAllocatedPropertyFieldsOffset =
- kInstanceSizesOffset + kPreAllocatedPropertyFieldsByte;
+ static const int kInObjectPropertiesOrConstructorFunctionIndexByte = 1;
+ static const int kInObjectPropertiesOrConstructorFunctionIndexOffset =
+ kInstanceSizesOffset + kInObjectPropertiesOrConstructorFunctionIndexByte;
+ // Note there is one byte available for use here.
+ static const int kUnusedByte = 2;
+ static const int kUnusedOffset = kInstanceSizesOffset + kUnusedByte;
static const int kVisitorIdByte = 3;
static const int kVisitorIdOffset = kInstanceSizesOffset + kVisitorIdByte;
@@ -6175,6 +5673,7 @@ class Map: public HeapObject {
static const int kInstanceTypeAndBitFieldOffset =
kInstanceAttributesOffset + 0;
static const int kBitField2Offset = kInstanceAttributesOffset + 2;
+ static const int kUnusedPropertyFieldsByte = 3;
static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 3;
STATIC_ASSERT(kInstanceTypeAndBitFieldOffset ==
@@ -6694,8 +6193,10 @@ class SharedFunctionInfo: public HeapObject {
DECL_ACCESSORS(instance_class_name, Object)
// [function data]: This field holds some additional data for function.
- // Currently it either has FunctionTemplateInfo to make benefit the API
- // or Smi identifying a builtin function.
+ // Currently it has one of:
+ // - a FunctionTemplateInfo to make benefit the API [IsApiFunction()].
+ // - a Smi identifying a builtin function [HasBuiltinFunctionId()].
+ // - a BytecodeArray for the interpreter [HasBytecodeArray()].
// In the long run we don't want all functions to have this field but
// we can fix that when we have a better model for storing hidden data
// on objects.
@@ -6705,6 +6206,8 @@ class SharedFunctionInfo: public HeapObject {
inline FunctionTemplateInfo* get_api_func_data();
inline bool HasBuiltinFunctionId();
inline BuiltinFunctionId builtin_function_id();
+ inline bool HasBytecodeArray();
+ inline BytecodeArray* bytecode_array();
// [script info]: Script from which the function originates.
DECL_ACCESSORS(script, Object)
@@ -6721,6 +6224,13 @@ class SharedFunctionInfo: public HeapObject {
inline int start_position_and_type() const;
inline void set_start_position_and_type(int value);
+ // The function is subject to debugging if a debug info is attached.
+ inline bool HasDebugInfo();
+ inline DebugInfo* GetDebugInfo();
+
+ // A function has debug code if the compiled code has debug break slots.
+ inline bool HasDebugCode();
+
// [debug info]: Debug information.
DECL_ACCESSORS(debug_info, Object)
@@ -6905,11 +6415,10 @@ class SharedFunctionInfo: public HeapObject {
inline void set_opt_count_and_bailout_reason(int value);
inline int opt_count_and_bailout_reason() const;
- void set_disable_optimization_reason(BailoutReason reason) {
- set_opt_count_and_bailout_reason(
- DisabledOptimizationReasonBits::update(opt_count_and_bailout_reason(),
- reason));
- }
+ inline void set_disable_optimization_reason(BailoutReason reason);
+
+ // Tells whether this function should be subject to debugging.
+ inline bool IsSubjectToDebugging();
// Check whether or not this function is inlineable.
bool IsInlineable();
@@ -6923,7 +6432,7 @@ class SharedFunctionInfo: public HeapObject {
// Calculate the number of in-object properties.
int CalculateInObjectProperties();
- inline bool is_simple_parameter_list();
+ inline bool has_simple_parameters();
// Initialize a SharedFunctionInfo from a parsed function literal.
static void InitFromFunctionLiteral(Handle<SharedFunctionInfo> shared_info,
@@ -7303,11 +6812,8 @@ class JSFunction: public JSObject {
// Tells whether this function is builtin.
inline bool IsBuiltin();
- // Tells whether this function is defined in a native script.
- inline bool IsFromNativeScript();
-
- // Tells whether this function is defined in an extension script.
- inline bool IsFromExtensionScript();
+ // Tells whether this function inlines the given shared function info.
+ bool Inlines(SharedFunctionInfo* candidate);
// Tells whether this function should be subject to debugging.
inline bool IsSubjectToDebugging();
@@ -7441,7 +6947,7 @@ class JSFunction: public JSObject {
// Returns `false` if formal parameters include rest parameters, optional
// parameters, or destructuring parameters.
// TODO(caitp): make this a flag set during parsing
- inline bool is_simple_parameter_list();
+ inline bool has_simple_parameters();
// [next_function_link]: Links functions into various lists, e.g. the list
// of optimized functions hanging off the native_context. The CodeFlusher
@@ -8241,56 +7747,25 @@ class AllocationSite: public Struct {
void ResetPretenureDecision();
- PretenureDecision pretenure_decision() {
- int value = pretenure_data()->value();
- return PretenureDecisionBits::decode(value);
- }
+ inline PretenureDecision pretenure_decision();
+ inline void set_pretenure_decision(PretenureDecision decision);
- void set_pretenure_decision(PretenureDecision decision) {
- int value = pretenure_data()->value();
- set_pretenure_data(
- Smi::FromInt(PretenureDecisionBits::update(value, decision)),
- SKIP_WRITE_BARRIER);
- }
-
- bool deopt_dependent_code() {
- int value = pretenure_data()->value();
- return DeoptDependentCodeBit::decode(value);
- }
-
- void set_deopt_dependent_code(bool deopt) {
- int value = pretenure_data()->value();
- set_pretenure_data(
- Smi::FromInt(DeoptDependentCodeBit::update(value, deopt)),
- SKIP_WRITE_BARRIER);
- }
-
- int memento_found_count() {
- int value = pretenure_data()->value();
- return MementoFoundCountBits::decode(value);
- }
+ inline bool deopt_dependent_code();
+ inline void set_deopt_dependent_code(bool deopt);
+ inline int memento_found_count();
inline void set_memento_found_count(int count);
- int memento_create_count() {
- return pretenure_create_count()->value();
- }
-
- void set_memento_create_count(int count) {
- set_pretenure_create_count(Smi::FromInt(count), SKIP_WRITE_BARRIER);
- }
+ inline int memento_create_count();
+ inline void set_memento_create_count(int count);
// The pretenuring decision is made during gc, and the zombie state allows
// us to recognize when an allocation site is just being kept alive because
// a later traversal of new space may discover AllocationMementos that point
// to this AllocationSite.
- bool IsZombie() {
- return pretenure_decision() == kZombie;
- }
+ inline bool IsZombie();
- bool IsMaybeTenure() {
- return pretenure_decision() == kMaybeTenure;
- }
+ inline bool IsMaybeTenure();
inline void MarkZombie();
@@ -8300,35 +7775,13 @@ class AllocationSite: public Struct {
inline bool DigestPretenuringFeedback(bool maximum_size_scavenge);
- ElementsKind GetElementsKind() {
- DCHECK(!SitePointsToLiteral());
- int value = Smi::cast(transition_info())->value();
- return ElementsKindBits::decode(value);
- }
-
- void SetElementsKind(ElementsKind kind) {
- int value = Smi::cast(transition_info())->value();
- set_transition_info(Smi::FromInt(ElementsKindBits::update(value, kind)),
- SKIP_WRITE_BARRIER);
- }
-
- bool CanInlineCall() {
- int value = Smi::cast(transition_info())->value();
- return DoNotInlineBit::decode(value) == 0;
- }
+ inline ElementsKind GetElementsKind();
+ inline void SetElementsKind(ElementsKind kind);
- void SetDoNotInlineCall() {
- int value = Smi::cast(transition_info())->value();
- set_transition_info(Smi::FromInt(DoNotInlineBit::update(value, true)),
- SKIP_WRITE_BARRIER);
- }
+ inline bool CanInlineCall();
+ inline void SetDoNotInlineCall();
- bool SitePointsToLiteral() {
- // If transition_info is a smi, then it represents an ElementsKind
- // for a constructed array. Otherwise, it must be a boilerplate
- // for an object or array literal.
- return transition_info()->IsJSArray() || transition_info()->IsJSObject();
- }
+ inline bool SitePointsToLiteral();
static void DigestTransitionFeedback(Handle<AllocationSite> site,
ElementsKind to_kind);
@@ -8363,9 +7816,7 @@ class AllocationSite: public Struct {
kSize> BodyDescriptor;
private:
- bool PretenuringDecisionMade() {
- return pretenure_decision() != kUndecided;
- }
+ inline bool PretenuringDecisionMade();
DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSite);
};
@@ -8378,14 +7829,8 @@ class AllocationMemento: public Struct {
DECL_ACCESSORS(allocation_site, Object)
- bool IsValid() {
- return allocation_site()->IsAllocationSite() &&
- !AllocationSite::cast(allocation_site())->IsZombie();
- }
- AllocationSite* GetAllocationSite() {
- DCHECK(IsValid());
- return AllocationSite::cast(allocation_site());
- }
+ inline bool IsValid();
+ inline AllocationSite* GetAllocationSite();
DECLARE_PRINTER(AllocationMemento)
DECLARE_VERIFIER(AllocationMemento)
@@ -8494,8 +7939,7 @@ class IteratingStringHasher : public StringHasher {
inline void VisitTwoByteString(const uint16_t* chars, int length);
private:
- inline IteratingStringHasher(int len, uint32_t seed)
- : StringHasher(len, seed) {}
+ inline IteratingStringHasher(int len, uint32_t seed);
void VisitConsString(ConsString* cons_string);
DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher);
};
@@ -8736,10 +8180,8 @@ class String: public Name {
class SubStringRange {
public:
- explicit SubStringRange(String* string, int first = 0, int length = -1)
- : string_(string),
- first_(first),
- length_(length == -1 ? string->length() : length) {}
+ explicit inline SubStringRange(String* string, int first = 0,
+ int length = -1);
class iterator;
inline iterator begin();
inline iterator end();
@@ -8886,12 +8328,11 @@ class String: public Name {
// ROBUST_STRING_TRAVERSAL invokes behaviour that is robust This means it
// handles unexpected data without causing assert failures and it does not
// do any heap allocations. This is useful when printing stack traces.
- SmartArrayPointer<char> ToCString(AllowNullsFlag allow_nulls,
- RobustnessFlag robustness_flag,
- int offset,
- int length,
- int* length_output = 0);
- SmartArrayPointer<char> ToCString(
+ base::SmartArrayPointer<char> ToCString(AllowNullsFlag allow_nulls,
+ RobustnessFlag robustness_flag,
+ int offset, int length,
+ int* length_output = 0);
+ base::SmartArrayPointer<char> ToCString(
AllowNullsFlag allow_nulls = DISALLOW_NULLS,
RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
int* length_output = 0);
@@ -8902,7 +8343,7 @@ class String: public Name {
// ROBUST_STRING_TRAVERSAL invokes behaviour that is robust This means it
// handles unexpected data without causing assert failures and it does not
// do any heap allocations. This is useful when printing stack traces.
- SmartArrayPointer<uc16> ToWideCString(
+ base::SmartArrayPointer<uc16> ToWideCString(
RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL);
bool ComputeArrayIndex(uint32_t* index);
@@ -9498,6 +8939,9 @@ class Oddball: public HeapObject {
// [to_number]: Cached to_number computed at startup.
DECL_ACCESSORS(to_number, Object)
+ // [typeof]: Cached type_of computed at startup.
+ DECL_ACCESSORS(type_of, String)
+
inline byte kind() const;
inline void set_kind(byte kind);
@@ -9507,16 +8951,15 @@ class Oddball: public HeapObject {
DECLARE_VERIFIER(Oddball)
// Initialize the fields.
- static void Initialize(Isolate* isolate,
- Handle<Oddball> oddball,
- const char* to_string,
- Handle<Object> to_number,
- byte kind);
+ static void Initialize(Isolate* isolate, Handle<Oddball> oddball,
+ const char* to_string, Handle<Object> to_number,
+ const char* type_of, byte kind);
// Layout description.
static const int kToStringOffset = HeapObject::kHeaderSize;
static const int kToNumberOffset = kToStringOffset + kPointerSize;
- static const int kKindOffset = kToNumberOffset + kPointerSize;
+ static const int kTypeOfOffset = kToNumberOffset + kPointerSize;
+ static const int kKindOffset = kTypeOfOffset + kPointerSize;
static const int kSize = kKindOffset + kPointerSize;
static const byte kFalse = 0;
@@ -9530,8 +8973,7 @@ class Oddball: public HeapObject {
static const byte kOther = 7;
static const byte kException = 8;
- typedef FixedBodyDescriptor<kToStringOffset,
- kToNumberOffset + kPointerSize,
+ typedef FixedBodyDescriptor<kToStringOffset, kTypeOfOffset + kPointerSize,
kSize> BodyDescriptor;
STATIC_ASSERT(kKindOffset == Internals::kOddballKindOffset);
@@ -9586,13 +9028,8 @@ class PropertyCell : public HeapObject {
// property.
DECL_ACCESSORS(dependent_code, DependentCode)
- PropertyDetails property_details() {
- return PropertyDetails(Smi::cast(property_details_raw()));
- }
-
- void set_property_details(PropertyDetails details) {
- set_property_details_raw(details.AsSmi());
- }
+ inline PropertyDetails property_details();
+ inline void set_property_details(PropertyDetails details);
PropertyCellConstantType GetConstantType();
@@ -10014,14 +9451,9 @@ class JSArrayBuffer: public JSObject {
DECLARE_PRINTER(JSArrayBuffer)
DECLARE_VERIFIER(JSArrayBuffer)
- static const int kByteLengthOffset = JSObject::kHeaderSize;
-
- // NOTE: GC will visit objects fields:
- // 1. From JSObject::BodyDescriptor::kStartOffset to kByteLengthOffset +
- // kPointerSize
- // 2. From start of the internal fields and up to the end of them
- static const int kBackingStoreOffset = kByteLengthOffset + kPointerSize;
- static const int kBitFieldSlot = kBackingStoreOffset + kPointerSize;
+ static const int kBackingStoreOffset = JSObject::kHeaderSize;
+ static const int kByteLengthOffset = kBackingStoreOffset + kPointerSize;
+ static const int kBitFieldSlot = kByteLengthOffset + kPointerSize;
#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
static const int kBitFieldOffset = kBitFieldSlot;
#else
@@ -10032,12 +9464,6 @@ class JSArrayBuffer: public JSObject {
static const int kSizeWithInternalFields =
kSize + v8::ArrayBuffer::kInternalFieldCount * kPointerSize;
- template <typename StaticVisitor>
- static inline void JSArrayBufferIterateBody(Heap* heap, HeapObject* obj);
-
- static inline void JSArrayBufferIterateBody(HeapObject* obj,
- ObjectVisitor* v);
-
class IsExternal : public BitField<bool, 1, 1> {};
class IsNeuterable : public BitField<bool, 2, 1> {};
class WasNeutered : public BitField<bool, 3, 1> {};
@@ -10286,9 +9712,8 @@ class AccessorInfo: public Struct {
static const int kSize = kExpectedReceiverTypeOffset + kPointerSize;
private:
- inline bool HasExpectedReceiverType() {
- return expected_receiver_type()->IsFunctionTemplateInfo();
- }
+ inline bool HasExpectedReceiverType();
+
// Bit positions in flag.
static const int kAllCanReadBit = 0;
static const int kAllCanWriteBit = 1;
@@ -10347,38 +9772,19 @@ class AccessorPair: public Struct {
static Handle<AccessorPair> Copy(Handle<AccessorPair> pair);
- Object* get(AccessorComponent component) {
- return component == ACCESSOR_GETTER ? getter() : setter();
- }
-
- void set(AccessorComponent component, Object* value) {
- if (component == ACCESSOR_GETTER) {
- set_getter(value);
- } else {
- set_setter(value);
- }
- }
+ inline Object* get(AccessorComponent component);
+ inline void set(AccessorComponent component, Object* value);
// Note: Returns undefined instead in case of a hole.
Object* GetComponent(AccessorComponent component);
// Set both components, skipping arguments which are a JavaScript null.
- void SetComponents(Object* getter, Object* setter) {
- if (!getter->IsNull()) set_getter(getter);
- if (!setter->IsNull()) set_setter(setter);
- }
+ inline void SetComponents(Object* getter, Object* setter);
- bool Equals(AccessorPair* pair) {
- return (this == pair) || pair->Equals(getter(), setter());
- }
-
- bool Equals(Object* getter_value, Object* setter_value) {
- return (getter() == getter_value) && (setter() == setter_value);
- }
+ inline bool Equals(AccessorPair* pair);
+ inline bool Equals(Object* getter_value, Object* setter_value);
- bool ContainsAccessor() {
- return IsJSAccessor(getter()) || IsJSAccessor(setter());
- }
+ inline bool ContainsAccessor();
// Dispatched behavior.
DECLARE_PRINTER(AccessorPair)
@@ -10394,9 +9800,7 @@ class AccessorPair: public Struct {
// var obj = {};
// Object.defineProperty(obj, "foo", {get: undefined});
// assertTrue("foo" in obj);
- bool IsJSAccessor(Object* obj) {
- return obj->IsSpecFunction() || obj->IsUndefined();
- }
+ inline bool IsJSAccessor(Object* obj);
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorPair);
};
@@ -10624,8 +10028,6 @@ class DebugInfo: public Struct {
public:
// The shared function info for the source being debugged.
DECL_ACCESSORS(shared, SharedFunctionInfo)
- // Code object for the original code.
- DECL_ACCESSORS(original_code, Code)
// Code object for the patched code. This code object is the code object
// currently active for the function.
DECL_ACCESSORS(code, Code)
@@ -10659,12 +10061,8 @@ class DebugInfo: public Struct {
DECLARE_VERIFIER(DebugInfo)
static const int kSharedFunctionInfoIndex = Struct::kHeaderSize;
- static const int kOriginalCodeIndex = kSharedFunctionInfoIndex + kPointerSize;
- static const int kPatchedCodeIndex = kOriginalCodeIndex + kPointerSize;
- static const int kActiveBreakPointsCountIndex =
- kPatchedCodeIndex + kPointerSize;
- static const int kBreakPointsStateIndex =
- kActiveBreakPointsCountIndex + kPointerSize;
+ static const int kCodeIndex = kSharedFunctionInfoIndex + kPointerSize;
+ static const int kBreakPointsStateIndex = kCodeIndex + kPointerSize;
static const int kSize = kBreakPointsStateIndex + kPointerSize;
static const int kEstimatedNofBreakPointsInFunction = 16;
@@ -10831,9 +10229,7 @@ class ObjectVisitor BASE_EMBEDDED {
class StructBodyDescriptor : public
FlexibleBodyDescriptor<HeapObject::kHeaderSize> {
public:
- static inline int SizeOf(Map* map, HeapObject* object) {
- return map->instance_size();
- }
+ static inline int SizeOf(Map* map, HeapObject* object);
};
diff --git a/deps/v8/src/optimizing-compile-dispatcher.cc b/deps/v8/src/optimizing-compile-dispatcher.cc
index f5c57cd1cf..1f98e7bc95 100644
--- a/deps/v8/src/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/optimizing-compile-dispatcher.cc
@@ -7,7 +7,7 @@
#include "src/v8.h"
#include "src/base/atomicops.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/hydrogen.h"
#include "src/isolate.h"
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index aa0ec104a7..ecc6530135 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/parser.h"
#include "src/api.h"
#include "src/ast.h"
@@ -14,7 +14,6 @@
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/messages.h"
-#include "src/parser.h"
#include "src/preparser.h"
#include "src/runtime/runtime.h"
#include "src/scanner-character-streams.h"
@@ -346,7 +345,7 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
: FunctionKind::kDefaultBaseConstructor;
Scope* function_scope = NewScope(scope, FUNCTION_SCOPE, kind);
function_scope->SetLanguageMode(
- static_cast<LanguageMode>(language_mode | STRICT_BIT));
+ static_cast<LanguageMode>(language_mode | STRICT));
// Set start and end position to the same value
function_scope->set_start_position(pos);
function_scope->set_end_position(pos);
@@ -789,9 +788,11 @@ Expression* ParserTraits::NewTargetExpression(Scope* scope,
AstNodeFactory* factory,
int pos) {
static const int kNewTargetStringLength = 10;
- return scope->NewUnresolved(
+ auto proxy = scope->NewUnresolved(
factory, parser_->ast_value_factory()->new_target_string(),
Variable::NORMAL, pos, pos + kNewTargetStringLength);
+ proxy->set_is_new_target();
+ return proxy;
}
@@ -873,12 +874,12 @@ Expression* ParserTraits::ParseV8Intrinsic(bool* ok) {
FunctionLiteral* ParserTraits::ParseFunctionLiteral(
const AstRawString* name, Scanner::Location function_name_location,
- bool name_is_strict_reserved, FunctionKind kind,
+ FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
FunctionLiteral::ArityRestriction arity_restriction,
LanguageMode language_mode, bool* ok) {
return parser_->ParseFunctionLiteral(
- name, function_name_location, name_is_strict_reserved, kind,
+ name, function_name_location, function_name_validity, kind,
function_token_position, type, arity_restriction, language_mode, ok);
}
@@ -910,13 +911,12 @@ Parser::Parser(ParseInfo* info)
DCHECK(!info->script().is_null() || info->source_stream() != NULL);
set_allow_lazy(info->allow_lazy_parsing());
set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
- set_allow_harmony_modules(!info->is_native() && FLAG_harmony_modules);
set_allow_harmony_arrow_functions(FLAG_harmony_arrow_functions);
set_allow_harmony_sloppy(FLAG_harmony_sloppy);
- set_allow_harmony_unicode(FLAG_harmony_unicode);
- set_allow_harmony_computed_property_names(
- FLAG_harmony_computed_property_names);
- set_allow_harmony_rest_params(FLAG_harmony_rest_parameters);
+ set_allow_harmony_sloppy_function(FLAG_harmony_sloppy_function);
+ set_allow_harmony_sloppy_let(FLAG_harmony_sloppy_let);
+ set_allow_harmony_rest_parameters(FLAG_harmony_rest_parameters);
+ set_allow_harmony_default_parameters(FLAG_harmony_default_parameters);
set_allow_harmony_spreadcalls(FLAG_harmony_spreadcalls);
set_allow_harmony_destructuring(FLAG_harmony_destructuring);
set_allow_harmony_spread_arrays(FLAG_harmony_spread_arrays);
@@ -989,7 +989,7 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
PrintF("[parsing eval");
} else if (info->script()->name()->IsString()) {
String* name = String::cast(info->script()->name());
- SmartArrayPointer<char> name_chars = name->ToCString();
+ base::SmartArrayPointer<char> name_chars = name->ToCString();
PrintF("[parsing script: %s", name_chars.get());
} else {
PrintF("[parsing script");
@@ -1016,7 +1016,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
FunctionLiteral* result = NULL;
{
- // TODO(wingo): Add an outer GLOBAL_SCOPE corresponding to the native
+ // TODO(wingo): Add an outer SCRIPT_SCOPE corresponding to the native
// context, which will have the "this" binding for script scopes.
Scope* scope = NewScope(scope_, SCRIPT_SCOPE);
info->set_script_scope(scope);
@@ -1053,7 +1053,6 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
bool ok = true;
int beg_pos = scanner()->location().beg_pos;
if (info->is_module()) {
- DCHECK(allow_harmony_modules());
ParseModuleItemList(body, &ok);
} else {
ParseStatementList(body, Token::EOS, &ok);
@@ -1065,6 +1064,8 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
if (ok && is_strict(language_mode())) {
CheckStrictOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
+ }
+ if (ok && (is_strict(language_mode()) || allow_harmony_sloppy())) {
CheckConflictingVarDeclarations(scope_, &ok);
}
@@ -1128,7 +1129,8 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info) {
if (FLAG_trace_parse && result != NULL) {
double ms = timer.Elapsed().InMillisecondsF();
- SmartArrayPointer<char> name_chars = result->debug_name()->ToCString();
+ base::SmartArrayPointer<char> name_chars =
+ result->debug_name()->ToCString();
PrintF("[parsing function: %s - took %0.3f ms]\n", name_chars.get(), ms);
}
return result;
@@ -1184,7 +1186,7 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
scope->SetLanguageMode(shared_info->language_mode());
scope->set_start_position(shared_info->start_position());
ExpressionClassifier formals_classifier;
- ParserFormalParameterParsingState parsing_state(scope);
+ ParserFormalParameters formals(scope);
Checkpoint checkpoint(this);
{
// Parsing patterns as variable reference expression creates
@@ -1193,20 +1195,23 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
BlockState block_state(&scope_, scope);
if (Check(Token::LPAREN)) {
// '(' StrictFormalParameters ')'
- ParseFormalParameterList(&parsing_state, &formals_classifier, &ok);
+ ParseFormalParameterList(&formals, &formals_classifier, &ok);
if (ok) ok = Check(Token::RPAREN);
} else {
// BindingIdentifier
- const bool is_rest = false;
- ParseFormalParameter(is_rest, &parsing_state, &formals_classifier,
- &ok);
+ ParseFormalParameter(&formals, &formals_classifier, &ok);
+ if (ok) {
+ DeclareFormalParameter(
+ formals.scope, formals.at(0), formals.is_simple,
+ &formals_classifier);
+ }
}
}
if (ok) {
- checkpoint.Restore(&parsing_state.materialized_literals_count);
+ checkpoint.Restore(&formals.materialized_literals_count);
Expression* expression =
- ParseArrowFunctionLiteral(parsing_state, formals_classifier, &ok);
+ ParseArrowFunctionLiteral(formals, formals_classifier, &ok);
if (ok) {
// Scanning must end at the same position that was recorded
// previously. If not, parsing has been interrupted due to a stack
@@ -1231,9 +1236,9 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
shared_info->language_mode());
} else {
result = ParseFunctionLiteral(
- raw_name, Scanner::Location::invalid(), false, shared_info->kind(),
- RelocInfo::kNoPosition, function_type, FunctionLiteral::NORMAL_ARITY,
- shared_info->language_mode(), &ok);
+ raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck,
+ shared_info->kind(), RelocInfo::kNoPosition, function_type,
+ FunctionLiteral::NORMAL_ARITY, shared_info->language_mode(), &ok);
}
// Make sure the results agree.
DCHECK(ok == (result != NULL));
@@ -1324,20 +1329,17 @@ void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
// / "use strong" directives, do the strict mode changes only once.
if (is_sloppy(scope_->language_mode())) {
scope_->SetLanguageMode(static_cast<LanguageMode>(
- scope_->language_mode() | STRICT_BIT));
+ scope_->language_mode() | STRICT));
}
if (use_strong_found) {
scope_->SetLanguageMode(static_cast<LanguageMode>(
- scope_->language_mode() | STRONG_BIT));
+ scope_->language_mode() | STRONG));
}
// Because declarations in strict eval code don't leak into the scope
// of the eval call, it is likely that functions declared in strict
// eval code will be used within the eval code, so lazy parsing is
- // probably not a win. Also, resolution of "var" bindings defined in
- // strict eval code from within nested functions is currently broken
- // with the pre-parser; lazy parsing of strict eval code causes
- // regress/regress-crbug-135066.js to fail.
+ // probably not a win.
if (scope_->is_eval_scope()) mode_ = PARSE_EAGERLY;
} else if (literal->raw_value()->AsString() ==
ast_value_factory()->use_asm_string() &&
@@ -1390,7 +1392,7 @@ Statement* Parser::ParseStatementListItem(bool* ok) {
case Token::VAR:
return ParseVariableStatement(kStatementListItem, NULL, ok);
case Token::LET:
- if (is_strict(language_mode())) {
+ if (allow_let()) {
return ParseVariableStatement(kStatementListItem, NULL, ok);
}
break;
@@ -1429,7 +1431,7 @@ void* Parser::ParseModuleItemList(ZoneList<Statement*>* body, bool* ok) {
DCHECK(scope_->is_module_scope());
scope_->SetLanguageMode(
- static_cast<LanguageMode>(scope_->language_mode() | STRICT_BIT));
+ static_cast<LanguageMode>(scope_->language_mode() | STRICT));
while (peek() != Token::EOS) {
Statement* stat = ParseModuleItem(CHECK_OK);
@@ -2008,15 +2010,17 @@ Variable* Parser::Declare(Declaration* declaration,
// variable and also set its mode. In any case, a Declaration node
// will be added to the scope so that the declaration can be added
// to the corresponding activation frame at runtime if necessary.
- // For instance declarations inside an eval scope need to be added
- // to the calling function context.
- // Similarly, strict mode eval scope does not leak variable declarations to
- // the caller's scope so we declare all locals, too.
+ // For instance, var declarations inside a sloppy eval scope need
+ // to be added to the calling function context. Similarly, strict
+ // mode eval scope and lexical eval bindings do not leak variable
+ // declarations to the caller's scope so we declare all locals, too.
if (declaration_scope->is_function_scope() ||
- declaration_scope->is_strict_eval_scope() ||
declaration_scope->is_block_scope() ||
declaration_scope->is_module_scope() ||
- declaration_scope->is_script_scope()) {
+ declaration_scope->is_script_scope() ||
+ (declaration_scope->is_eval_scope() &&
+ (is_strict(declaration_scope->language_mode()) ||
+ IsLexicalVariableMode(mode)))) {
// Declare the variable in the declaration scope.
var = declaration_scope->LookupLocal(name);
if (var == NULL) {
@@ -2052,13 +2056,13 @@ Variable* Parser::Declare(Declaration* declaration,
// because the var declaration is hoisted to the function scope where 'x'
// is already bound.
DCHECK(IsDeclaredVariableMode(var->mode()));
- if (is_strict(language_mode())) {
+ if (is_strict(language_mode()) || allow_harmony_sloppy()) {
// In harmony we treat re-declarations as early errors. See
// ES5 16 for a definition of early errors.
if (declaration_kind == DeclarationDescriptor::NORMAL) {
ParserTraits::ReportMessage(MessageTemplate::kVarRedeclaration, name);
} else {
- ParserTraits::ReportMessage(MessageTemplate::kStrictParamDupe);
+ ParserTraits::ReportMessage(MessageTemplate::kParamDupe);
}
*ok = false;
return nullptr;
@@ -2069,8 +2073,22 @@ Variable* Parser::Declare(Declaration* declaration,
} else if (mode == VAR) {
var->set_maybe_assigned();
}
+ } else if (declaration_scope->is_eval_scope() &&
+ is_sloppy(declaration_scope->language_mode()) &&
+ !IsLexicalVariableMode(mode)) {
+ // In a var binding in a sloppy direct eval, pollute the enclosing scope
+ // with this new binding by doing the following:
+ // The proxy is bound to a lookup variable to force a dynamic declaration
+ // using the DeclareLookupSlot runtime function.
+ Variable::Kind kind = Variable::NORMAL;
+ // TODO(sigurds) figure out if kNotAssigned is OK here
+ var = new (zone()) Variable(declaration_scope, name, mode, kind,
+ declaration->initialization(), kNotAssigned);
+ var->AllocateTo(VariableLocation::LOOKUP, -1);
+ resolve = true;
}
+
// We add a declaration node for every declaration. The compiler
// will only generate code if necessary. In particular, declarations
// for inner local variables that do not represent functions won't
@@ -2095,17 +2113,6 @@ Variable* Parser::Declare(Declaration* declaration,
Variable::Kind kind = Variable::NORMAL;
var = new (zone()) Variable(declaration_scope, name, mode, kind,
kNeedsInitialization, kNotAssigned);
- } else if (declaration_scope->is_eval_scope() &&
- is_sloppy(declaration_scope->language_mode())) {
- // For variable declarations in a sloppy eval scope the proxy is bound
- // to a lookup variable to force a dynamic declaration using the
- // DeclareLookupSlot runtime function.
- Variable::Kind kind = Variable::NORMAL;
- // TODO(sigurds) figure out if kNotAssigned is OK here
- var = new (zone()) Variable(declaration_scope, name, mode, kind,
- declaration->initialization(), kNotAssigned);
- var->AllocateTo(VariableLocation::LOOKUP, -1);
- resolve = true;
}
// If requested and we have a local variable, bind the proxy to the variable
@@ -2196,12 +2203,21 @@ Statement* Parser::ParseFunctionDeclaration(
bool is_strict_reserved = false;
const AstRawString* name = ParseIdentifierOrStrictReservedWord(
&is_strict_reserved, CHECK_OK);
+
+ if (fni_ != NULL) {
+ fni_->Enter();
+ fni_->PushEnclosingName(name);
+ }
FunctionLiteral* fun = ParseFunctionLiteral(
- name, scanner()->location(), is_strict_reserved,
+ name, scanner()->location(),
+ is_strict_reserved ? kFunctionNameIsStrictReserved
+ : kFunctionNameValidityUnknown,
is_generator ? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction,
pos, FunctionLiteral::DECLARATION, FunctionLiteral::NORMAL_ARITY,
language_mode(), CHECK_OK);
+ if (fni_ != NULL) fni_->Leave();
+
// Even if we're not at the top-level of the global or a function
// scope, we treat it as such and introduce the function with its
// initial value upon entering the corresponding scope.
@@ -2210,9 +2226,8 @@ Statement* Parser::ParseFunctionDeclaration(
VariableMode mode =
is_strong(language_mode())
? CONST
- : is_strict(language_mode()) &&
- !(scope_->is_script_scope() || scope_->is_eval_scope() ||
- scope_->is_function_scope())
+ : (is_strict(language_mode()) || allow_harmony_sloppy_function()) &&
+ !scope_->is_declaration_scope()
? LET
: VAR;
VariableProxy* proxy = NewUnresolved(name, mode);
@@ -2290,7 +2305,7 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok) {
- if (is_strict(language_mode())) {
+ if (is_strict(language_mode()) || allow_harmony_sloppy()) {
return ParseScopedBlock(labels, ok);
}
@@ -2442,14 +2457,14 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
parsing_result->descriptor.init_op = Token::INIT_CONST_LEGACY;
++use_counts_[v8::Isolate::kLegacyConst];
} else {
- DCHECK(is_strict(language_mode()));
+ DCHECK(is_strict(language_mode()) || allow_harmony_sloppy());
DCHECK(var_context != kStatement);
parsing_result->descriptor.mode = CONST;
parsing_result->descriptor.init_op = Token::INIT_CONST;
}
parsing_result->descriptor.is_const = true;
parsing_result->descriptor.needs_init = true;
- } else if (peek() == Token::LET && is_strict(language_mode())) {
+ } else if (peek() == Token::LET && allow_let()) {
Consume(Token::LET);
DCHECK(var_context != kStatement);
parsing_result->descriptor.mode = LET;
@@ -2823,7 +2838,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
//
// return (temp = expr) === undefined ? this :
// %_IsSpecObject(temp) ? temp : throw new TypeError(...);
- Variable* temp = scope_->DeclarationScope()->NewTemporary(
+ Variable* temp = scope_->NewTemporary(
ast_value_factory()->empty_string());
Assignment* assign = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(temp), return_value, pos);
@@ -3177,9 +3192,9 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
ForOfStatement* for_of = stmt->AsForOfStatement();
if (for_of != NULL) {
- Variable* iterator = scope_->DeclarationScope()->NewTemporary(
+ Variable* iterator = scope_->NewTemporary(
ast_value_factory()->dot_iterator_string());
- Variable* result = scope_->DeclarationScope()->NewTemporary(
+ Variable* result = scope_->NewTemporary(
ast_value_factory()->dot_result_string());
Expression* assign_iterator;
@@ -3295,7 +3310,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// make statement: temp_x = x.
for (int i = 0; i < names->length(); i++) {
VariableProxy* proxy = NewUnresolved(names->at(i), LET);
- Variable* temp = scope_->DeclarationScope()->NewTemporary(temp_name);
+ Variable* temp = scope_->NewTemporary(temp_name);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
Assignment* assignment = factory()->NewAssignment(
Token::ASSIGN, temp_proxy, proxy, RelocInfo::kNoPosition);
@@ -3308,7 +3323,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Variable* first = NULL;
// Make statement: first = 1.
if (next) {
- first = scope_->DeclarationScope()->NewTemporary(temp_name);
+ first = scope_->NewTemporary(temp_name);
VariableProxy* first_proxy = factory()->NewVariableProxy(first);
Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
Assignment* assignment = factory()->NewAssignment(
@@ -3388,7 +3403,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
ignore_completion_block->AddStatement(clear_first_or_next, zone());
}
- Variable* flag = scope_->DeclarationScope()->NewTemporary(temp_name);
+ Variable* flag = scope_->NewTemporary(temp_name);
// Make statement: flag = 1.
{
VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
@@ -3501,7 +3516,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
DeclarationParsingResult parsing_result;
if (peek() != Token::SEMICOLON) {
if (peek() == Token::VAR || (peek() == Token::CONST && allow_const()) ||
- (peek() == Token::LET && is_strict(language_mode()))) {
+ (peek() == Token::LET && allow_let())) {
ParseVariableDeclarations(kForStatement, &parsing_result, CHECK_OK);
is_const = parsing_result.descriptor.mode == CONST;
@@ -3574,7 +3589,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// let x; // for TDZ
// }
- Variable* temp = scope_->DeclarationScope()->NewTemporary(
+ Variable* temp = scope_->NewTemporary(
ast_value_factory()->dot_for_string());
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, stmt_pos);
@@ -3665,8 +3680,9 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
CHECK_OK);
}
} else {
- Scanner::Location lhs_location = scanner()->peek_location();
+ int lhs_beg_pos = peek_position();
Expression* expression = ParseExpression(false, CHECK_OK);
+ int lhs_end_pos = scanner()->location().end_pos;
ForEachStatement::VisitMode mode;
bool accept_OF = expression->IsVariableProxy();
is_let_identifier_expression =
@@ -3677,8 +3693,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
if (CheckInOrOf(accept_OF, &mode, ok)) {
if (!*ok) return nullptr;
expression = this->CheckAndRewriteReferenceExpression(
- expression, lhs_location, MessageTemplate::kInvalidLhsInFor,
- CHECK_OK);
+ expression, lhs_beg_pos, lhs_end_pos,
+ MessageTemplate::kInvalidLhsInFor, kSyntaxError, CHECK_OK);
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, stmt_pos);
@@ -3697,8 +3713,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
return loop;
} else {
- init =
- factory()->NewExpressionStatement(expression, lhs_location.beg_pos);
+ init = factory()->NewExpressionStatement(expression, lhs_beg_pos);
}
}
}
@@ -3839,10 +3854,10 @@ Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
void ParserTraits::ParseArrowFunctionFormalParameters(
- ParserFormalParameterParsingState* parsing_state, Expression* expr,
- const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
- bool* ok) {
- if (parsing_state->scope->num_parameters() >= Code::kMaxArguments) {
+ ParserFormalParameters* parameters, Expression* expr,
+ const Scanner::Location& params_loc,
+ Scanner::Location* duplicate_loc, bool* ok) {
+ if (parameters->Arity() >= Code::kMaxArguments) {
ReportMessageAt(params_loc, MessageTemplate::kMalformedArrowFunParamList);
*ok = false;
return;
@@ -3868,7 +3883,7 @@ void ParserTraits::ParseArrowFunctionFormalParameters(
DCHECK_EQ(binop->op(), Token::COMMA);
Expression* left = binop->left();
Expression* right = binop->right();
- ParseArrowFunctionFormalParameters(parsing_state, left, params_loc,
+ ParseArrowFunctionFormalParameters(parameters, left, params_loc,
duplicate_loc, ok);
if (!*ok) return;
// LHS of comma expression should be unparenthesized.
@@ -3876,12 +3891,15 @@ void ParserTraits::ParseArrowFunctionFormalParameters(
}
// Only the right-most expression may be a rest parameter.
- DCHECK(!parsing_state->has_rest);
+ DCHECK(!parameters->has_rest);
- bool is_rest = false;
- if (expr->IsSpread()) {
- is_rest = true;
+ bool is_rest = expr->IsSpread();
+ if (is_rest) {
expr = expr->AsSpread()->expression();
+ parameters->has_rest = true;
+ }
+ if (parameters->is_simple) {
+ parameters->is_simple = !is_rest && expr->IsVariableProxy();
}
if (expr->IsVariableProxy()) {
@@ -3893,20 +3911,48 @@ void ParserTraits::ParseArrowFunctionFormalParameters(
parser_->scope_->RemoveUnresolved(expr->AsVariableProxy());
}
- ExpressionClassifier classifier;
- DeclareFormalParameter(parsing_state, expr, &classifier, is_rest);
- if (!duplicate_loc->IsValid()) {
- *duplicate_loc = classifier.duplicate_formal_parameter_error().location;
+ Expression* initializer = nullptr;
+ if (!is_rest && parser_->allow_harmony_default_parameters() &&
+ parser_->Check(Token::ASSIGN)) {
+ ExpressionClassifier init_classifier;
+ initializer =
+ parser_->ParseAssignmentExpression(true, &init_classifier, ok);
+ if (!*ok) return;
+ parser_->ValidateExpression(&init_classifier, ok);
+ if (!*ok) return;
+ parameters->is_simple = false;
+ }
+
+ AddFormalParameter(parameters, expr, initializer, is_rest);
+}
+
+
+void ParserTraits::ParseArrowFunctionFormalParameterList(
+ ParserFormalParameters* parameters, Expression* expr,
+ const Scanner::Location& params_loc,
+ Scanner::Location* duplicate_loc, bool* ok) {
+ ParseArrowFunctionFormalParameters(parameters, expr, params_loc,
+ duplicate_loc, ok);
+ if (!*ok) return;
+
+ for (int i = 0; i < parameters->Arity(); ++i) {
+ auto parameter = parameters->at(i);
+ ExpressionClassifier classifier;
+ DeclareFormalParameter(
+ parameters->scope, parameter, parameters->is_simple, &classifier);
+ if (!duplicate_loc->IsValid()) {
+ *duplicate_loc = classifier.duplicate_formal_parameter_error().location;
+ }
}
+ DCHECK_EQ(parameters->is_simple, parameters->scope->has_simple_parameters());
}
-void ParserTraits::ReindexLiterals(
- const ParserFormalParameterParsingState& parsing_state) {
+void ParserTraits::ReindexLiterals(const ParserFormalParameters& parameters) {
if (parser_->function_state_->materialized_literal_count() > 0) {
AstLiteralReindexer reindexer;
- for (const auto p : parsing_state.params) {
+ for (const auto p : parameters.params) {
if (p.pattern != nullptr) reindexer.Reindex(p.pattern);
}
DCHECK(reindexer.count() <=
@@ -3917,8 +3963,8 @@ void ParserTraits::ReindexLiterals(
FunctionLiteral* Parser::ParseFunctionLiteral(
const AstRawString* function_name, Scanner::Location function_name_location,
- bool name_is_strict_reserved, FunctionKind kind, int function_token_pos,
- FunctionLiteral::FunctionType function_type,
+ FunctionNameValidity function_name_validity, FunctionKind kind,
+ int function_token_pos, FunctionLiteral::FunctionType function_type,
FunctionLiteral::ArityRestriction arity_restriction,
LanguageMode language_mode, bool* ok) {
// Function ::
@@ -3945,7 +3991,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
function_name = ast_value_factory()->empty_string();
}
- int num_parameters = 0;
// Function declarations are function scoped in normal mode, so they are
// hoisted. In harmony block scoping mode they are block scoped, so they
// are not hoisted.
@@ -3976,13 +4021,14 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
Scope* declaration_scope = scope_->DeclarationScope();
Scope* original_declaration_scope = original_scope_->DeclarationScope();
Scope* scope = function_type == FunctionLiteral::DECLARATION &&
- is_sloppy(language_mode) &&
+ is_sloppy(language_mode) && !allow_harmony_sloppy() &&
(original_scope_ == original_declaration_scope ||
declaration_scope != original_declaration_scope)
? NewScope(declaration_scope, FUNCTION_SCOPE, kind)
: NewScope(scope_, FUNCTION_SCOPE, kind);
scope->SetLanguageMode(language_mode);
ZoneList<Statement*>* body = NULL;
+ int arity = -1;
int materialized_literal_count = -1;
int expected_property_count = -1;
DuplicateFinder duplicate_finder(scanner()->unicode_cache());
@@ -3991,7 +4037,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
parenthesized_function_ ? FunctionLiteral::kShouldEagerCompile
: FunctionLiteral::kShouldLazyCompile;
bool should_be_used_once_hint = false;
- // Parse function body.
+ // Parse function.
{
AstNodeFactory function_factory(ast_value_factory());
FunctionState function_state(&function_state_, &scope_, scope, kind,
@@ -4007,7 +4053,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Calling a generator returns a generator object. That object is stored
// in a temporary variable, a definition that is used by "yield"
// expressions. This also marks the FunctionState as a generator.
- Variable* temp = scope_->DeclarationScope()->NewTemporary(
+ Variable* temp = scope_->NewTemporary(
ast_value_factory()->dot_generator_object_string());
function_state.set_generator_object_variable(temp);
}
@@ -4015,40 +4061,17 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
Expect(Token::LPAREN, CHECK_OK);
int start_position = scanner()->location().beg_pos;
scope_->set_start_position(start_position);
- ParserFormalParameterParsingState parsing_state(scope);
- num_parameters =
- ParseFormalParameterList(&parsing_state, &formals_classifier, CHECK_OK);
+ ParserFormalParameters formals(scope);
+ ParseFormalParameterList(&formals, &formals_classifier, CHECK_OK);
+ arity = formals.Arity();
Expect(Token::RPAREN, CHECK_OK);
int formals_end_position = scanner()->location().end_pos;
- CheckArityRestrictions(num_parameters, arity_restriction,
- parsing_state.has_rest, start_position,
+ CheckArityRestrictions(arity, arity_restriction,
+ formals.has_rest, start_position,
formals_end_position, CHECK_OK);
Expect(Token::LBRACE, CHECK_OK);
- // If we have a named function expression, we add a local variable
- // declaration to the body of the function with the name of the
- // function and let it refer to the function itself (closure).
- // NOTE: We create a proxy and resolve it here so that in the
- // future we can change the AST to only refer to VariableProxies
- // instead of Variables and Proxis as is the case now.
- Variable* fvar = NULL;
- Token::Value fvar_init_op = Token::INIT_CONST_LEGACY;
- if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
- if (is_strict(language_mode)) {
- fvar_init_op = Token::INIT_CONST;
- }
- VariableMode fvar_mode = is_strict(language_mode) ? CONST : CONST_LEGACY;
- DCHECK(function_name != NULL);
- fvar = new (zone())
- Variable(scope_, function_name, fvar_mode, Variable::NORMAL,
- kCreatedInitialized, kNotAssigned);
- VariableProxy* proxy = factory()->NewVariableProxy(fvar);
- VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration(
- proxy, fvar_mode, scope_, RelocInfo::kNoPosition);
- scope_->DeclareFunctionVar(fvar_declaration);
- }
-
// Determine if the function can be parsed lazily. Lazy parsing is different
// from lazy compilation; we need to parse more eagerly than we compile.
@@ -4112,8 +4135,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
}
if (!is_lazily_parsed) {
- body = ParseEagerFunctionBody(function_name, pos, parsing_state, fvar,
- fvar_init_op, kind, CHECK_OK);
+ body = ParseEagerFunctionBody(function_name, pos, formals, kind,
+ function_type, CHECK_OK);
materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
}
@@ -4133,19 +4156,18 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Validate name and parameter names. We can do this only after parsing the
// function, since the function can declare itself strict.
- CheckFunctionName(language_mode, kind, function_name,
- name_is_strict_reserved, function_name_location,
- CHECK_OK);
- const bool use_strict_params =
- !parsing_state.is_simple_parameter_list || IsConciseMethod(kind);
+ CheckFunctionName(language_mode, function_name, function_name_validity,
+ function_name_location, CHECK_OK);
const bool allow_duplicate_parameters =
- is_sloppy(language_mode) && !use_strict_params;
+ is_sloppy(language_mode) && formals.is_simple && !IsConciseMethod(kind);
ValidateFormalParameters(&formals_classifier, language_mode,
allow_duplicate_parameters, CHECK_OK);
if (is_strict(language_mode)) {
CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
CHECK_OK);
+ }
+ if (is_strict(language_mode) || allow_harmony_sloppy()) {
CheckConflictingVarDeclarations(scope, CHECK_OK);
}
}
@@ -4158,7 +4180,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
function_name, ast_value_factory(), scope, body,
- materialized_literal_count, expected_property_count, num_parameters,
+ materialized_literal_count, expected_property_count, arity,
duplicate_parameters, function_type, FunctionLiteral::kIsFunction,
eager_compile_hint, kind, pos);
function_literal->set_function_token_position(function_token_pos);
@@ -4300,15 +4322,15 @@ Statement* Parser::BuildAssertIsCoercible(Variable* var) {
Block* Parser::BuildParameterInitializationBlock(
- const ParserFormalParameterParsingState& formal_parameters, bool* ok) {
+ const ParserFormalParameters& parameters, bool* ok) {
+ DCHECK(!parameters.is_simple);
DCHECK(scope_->is_function_scope());
- Block* init_block = nullptr;
- for (auto parameter : formal_parameters.params) {
- if (parameter.pattern == nullptr) continue;
- if (init_block == nullptr) {
- init_block = factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
- }
-
+ Block* init_block =
+ factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
+ for (int i = 0; i < parameters.params.length(); ++i) {
+ auto parameter = parameters.params[i];
+ // TODO(caitp,rossberg): Remove special handling for rest once desugared.
+ if (parameter.is_rest) break;
DeclarationDescriptor descriptor;
descriptor.declaration_kind = DeclarationDescriptor::PARAMETER;
descriptor.parser = this;
@@ -4320,9 +4342,22 @@ Block* Parser::BuildParameterInitializationBlock(
descriptor.declaration_pos = parameter.pattern->position();
descriptor.initialization_pos = parameter.pattern->position();
descriptor.init_op = Token::INIT_LET;
+ Expression* initial_value =
+ factory()->NewVariableProxy(parameters.scope->parameter(i));
+ if (parameter.initializer != nullptr) {
+ // IS_UNDEFINED($param) ? initializer : $param
+ auto condition = factory()->NewCompareOperation(
+ Token::EQ_STRICT,
+ factory()->NewVariableProxy(parameters.scope->parameter(i)),
+ factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+ initial_value = factory()->NewConditional(
+ condition, parameter.initializer, initial_value,
+ RelocInfo::kNoPosition);
+ descriptor.initialization_pos = parameter.initializer->position();
+ }
DeclarationParsingResult::Declaration decl(
- parameter.pattern, parameter.pattern->position(),
- factory()->NewVariableProxy(parameter.var));
+ parameter.pattern, parameter.pattern->position(), initial_value);
PatternRewriter::DeclareAndInitializeVariables(init_block, &descriptor,
&decl, nullptr, CHECK_OK);
}
@@ -4332,81 +4367,140 @@ Block* Parser::BuildParameterInitializationBlock(
ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
const AstRawString* function_name, int pos,
- const ParserFormalParameterParsingState& formal_parameters, Variable* fvar,
- Token::Value fvar_init_op, FunctionKind kind, bool* ok) {
+ const ParserFormalParameters& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok) {
// Everything inside an eagerly parsed function will be parsed eagerly
// (see comment above).
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
- ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(8, zone());
- if (fvar != NULL) {
- VariableProxy* fproxy = scope_->NewUnresolved(factory(), function_name);
- fproxy->BindTo(fvar);
- body->Add(factory()->NewExpressionStatement(
- factory()->NewAssignment(fvar_init_op,
- fproxy,
- factory()->NewThisFunction(pos),
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition), zone());
- }
+ ZoneList<Statement*>* result = new(zone()) ZoneList<Statement*>(8, zone());
+ static const int kFunctionNameAssignmentIndex = 0;
+ if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
+ DCHECK(function_name != NULL);
+ // If we have a named function expression, we add a local variable
+ // declaration to the body of the function with the name of the
+ // function and let it refer to the function itself (closure).
+ // Not having parsed the function body, the language mode may still change,
+ // so we reserve a spot and create the actual const assignment later.
+ DCHECK_EQ(kFunctionNameAssignmentIndex, result->length());
+ result->Add(NULL, zone());
+ }
// For concise constructors, check that they are constructed,
// not called.
if (i::IsConstructor(kind)) {
- AddAssertIsConstruct(body, pos);
+ AddAssertIsConstruct(result, pos);
}
- auto init_block =
- BuildParameterInitializationBlock(formal_parameters, CHECK_OK);
- if (init_block != nullptr) {
- body->Add(init_block, zone());
+ ZoneList<Statement*>* body = result;
+ Scope* inner_scope = nullptr;
+ Block* inner_block = nullptr;
+ if (!parameters.is_simple) {
+ inner_scope = NewScope(scope_, BLOCK_SCOPE);
+ inner_scope->set_is_declaration_scope();
+ inner_scope->set_start_position(scanner()->location().beg_pos);
+ inner_block = factory()->NewBlock(NULL, 8, true, RelocInfo::kNoPosition);
+ inner_block->set_scope(inner_scope);
+ body = inner_block->statements();
}
- // For generators, allocate and yield an iterator on function entry.
- if (IsGeneratorFunction(kind)) {
- ZoneList<Expression*>* arguments =
- new(zone()) ZoneList<Expression*>(0, zone());
- CallRuntime* allocation = factory()->NewCallRuntime(
- ast_value_factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject), arguments,
- pos);
- VariableProxy* init_proxy = factory()->NewVariableProxy(
- function_state_->generator_object_variable());
- Assignment* assignment = factory()->NewAssignment(
- Token::INIT_VAR, init_proxy, allocation, RelocInfo::kNoPosition);
- VariableProxy* get_proxy = factory()->NewVariableProxy(
- function_state_->generator_object_variable());
- Yield* yield = factory()->NewYield(
- get_proxy, assignment, Yield::kInitial, RelocInfo::kNoPosition);
- body->Add(factory()->NewExpressionStatement(
- yield, RelocInfo::kNoPosition), zone());
- }
+ {
+ BlockState block_state(&scope_, inner_scope ? inner_scope : scope_);
- ParseStatementList(body, Token::RBRACE, CHECK_OK);
+ // For generators, allocate and yield an iterator on function entry.
+ if (IsGeneratorFunction(kind)) {
+ ZoneList<Expression*>* arguments =
+ new(zone()) ZoneList<Expression*>(0, zone());
+ CallRuntime* allocation = factory()->NewCallRuntime(
+ ast_value_factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject), arguments,
+ pos);
+ VariableProxy* init_proxy = factory()->NewVariableProxy(
+ function_state_->generator_object_variable());
+ Assignment* assignment = factory()->NewAssignment(
+ Token::INIT_VAR, init_proxy, allocation, RelocInfo::kNoPosition);
+ VariableProxy* get_proxy = factory()->NewVariableProxy(
+ function_state_->generator_object_variable());
+ Yield* yield = factory()->NewYield(
+ get_proxy, assignment, Yield::kInitial, RelocInfo::kNoPosition);
+ body->Add(factory()->NewExpressionStatement(
+ yield, RelocInfo::kNoPosition), zone());
+ }
- if (IsGeneratorFunction(kind)) {
- VariableProxy* get_proxy = factory()->NewVariableProxy(
- function_state_->generator_object_variable());
- Expression* undefined =
- factory()->NewUndefinedLiteral(RelocInfo::kNoPosition);
- Yield* yield = factory()->NewYield(get_proxy, undefined, Yield::kFinal,
- RelocInfo::kNoPosition);
- body->Add(factory()->NewExpressionStatement(
- yield, RelocInfo::kNoPosition), zone());
- }
-
- if (IsSubclassConstructor(kind)) {
- body->Add(
- factory()->NewReturnStatement(
- this->ThisExpression(scope_, factory(), RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
- zone());
+ ParseStatementList(body, Token::RBRACE, CHECK_OK);
+
+ if (IsGeneratorFunction(kind)) {
+ VariableProxy* get_proxy = factory()->NewVariableProxy(
+ function_state_->generator_object_variable());
+ Expression* undefined =
+ factory()->NewUndefinedLiteral(RelocInfo::kNoPosition);
+ Yield* yield = factory()->NewYield(get_proxy, undefined, Yield::kFinal,
+ RelocInfo::kNoPosition);
+ body->Add(factory()->NewExpressionStatement(
+ yield, RelocInfo::kNoPosition), zone());
+ }
+
+ if (IsSubclassConstructor(kind)) {
+ body->Add(
+ factory()->NewReturnStatement(
+ this->ThisExpression(scope_, factory(), RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ zone());
+ }
}
Expect(Token::RBRACE, CHECK_OK);
scope_->set_end_position(scanner()->location().end_pos);
- return body;
+ if (!parameters.is_simple) {
+ DCHECK_NOT_NULL(inner_scope);
+ DCHECK_EQ(body, inner_block->statements());
+ scope_->SetLanguageMode(inner_scope->language_mode());
+ Block* init_block = BuildParameterInitializationBlock(parameters, CHECK_OK);
+ DCHECK_NOT_NULL(init_block);
+
+ inner_scope->set_end_position(scanner()->location().end_pos);
+ inner_scope = inner_scope->FinalizeBlockScope();
+ if (inner_scope != nullptr) {
+ CheckConflictingVarDeclarations(inner_scope, CHECK_OK);
+ }
+
+ result->Add(init_block, zone());
+ result->Add(inner_block, zone());
+ }
+
+ if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
+ // Now that we know the language mode, we can create the const assignment
+ // in the previously reserved spot.
+ // NOTE: We create a proxy and resolve it here so that in the
+ // future we can change the AST to only refer to VariableProxies
+ // instead of Variables and Proxies as is the case now.
+ Token::Value fvar_init_op = Token::INIT_CONST_LEGACY;
+ bool use_strict_const = is_strict(scope_->language_mode()) ||
+ (!allow_legacy_const() && allow_harmony_sloppy());
+ if (use_strict_const) {
+ fvar_init_op = Token::INIT_CONST;
+ }
+ VariableMode fvar_mode = use_strict_const ? CONST : CONST_LEGACY;
+ Variable* fvar = new (zone())
+ Variable(scope_, function_name, fvar_mode, Variable::NORMAL,
+ kCreatedInitialized, kNotAssigned);
+ VariableProxy* proxy = factory()->NewVariableProxy(fvar);
+ VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration(
+ proxy, fvar_mode, scope_, RelocInfo::kNoPosition);
+ scope_->DeclareFunctionVar(fvar_declaration);
+
+ VariableProxy* fproxy = scope_->NewUnresolved(factory(), function_name);
+ fproxy->BindTo(fvar);
+ result->Set(kFunctionNameAssignmentIndex,
+ factory()->NewExpressionStatement(
+ factory()->NewAssignment(fvar_init_op, fproxy,
+ factory()->NewThisFunction(pos),
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition));
+ }
+
+ return result;
}
@@ -4425,12 +4519,11 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
reusable_preparser_->set_allow_lazy(true);
#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
SET_ALLOW(natives);
- SET_ALLOW(harmony_modules);
SET_ALLOW(harmony_arrow_functions);
SET_ALLOW(harmony_sloppy);
- SET_ALLOW(harmony_unicode);
- SET_ALLOW(harmony_computed_property_names);
- SET_ALLOW(harmony_rest_params);
+ SET_ALLOW(harmony_sloppy_let);
+ SET_ALLOW(harmony_rest_parameters);
+ SET_ALLOW(harmony_default_parameters);
SET_ALLOW(harmony_spreadcalls);
SET_ALLOW(harmony_destructuring);
SET_ALLOW(harmony_spread_arrays);
@@ -4472,7 +4565,7 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
BlockState block_state(&scope_, block_scope);
scope_->SetLanguageMode(
- static_cast<LanguageMode>(scope_->language_mode() | STRICT_BIT));
+ static_cast<LanguageMode>(scope_->language_mode() | STRICT));
scope_->SetScopeName(name);
VariableProxy* proxy = NULL;
@@ -5634,7 +5727,7 @@ bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
bool Parser::ParseStatic(ParseInfo* info) {
Parser parser(info);
if (parser.Parse(info)) {
- info->set_language_mode(info->function()->language_mode());
+ info->set_language_mode(info->literal()->language_mode());
return true;
}
return false;
@@ -5642,7 +5735,7 @@ bool Parser::ParseStatic(ParseInfo* info) {
bool Parser::Parse(ParseInfo* info) {
- DCHECK(info->function() == NULL);
+ DCHECK(info->literal() == NULL);
FunctionLiteral* result = NULL;
// Ok to use Isolate here; this function is only called in the main thread.
DCHECK(parsing_on_main_thread_);
@@ -5677,7 +5770,7 @@ bool Parser::Parse(ParseInfo* info) {
void Parser::ParseOnBackground(ParseInfo* info) {
parsing_on_main_thread_ = false;
- DCHECK(info->function() == NULL);
+ DCHECK(info->literal() == NULL);
FunctionLiteral* result = NULL;
fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
@@ -5903,16 +5996,16 @@ Expression* Parser::SpreadCall(Expression* function,
int pos) {
if (function->IsSuperCallReference()) {
// Super calls
- // %_CallSuperWithSpread(%ReflectConstruct(<super>, args, new.target))
- args->InsertAt(0, function, zone());
+ // %ReflectConstruct(%GetPrototype(<this-function>), args, new.target))
+ ZoneList<Expression*>* tmp = new (zone()) ZoneList<Expression*>(1, zone());
+ tmp->Add(function->AsSuperCallReference()->this_function_var(), zone());
+ Expression* get_prototype = factory()->NewCallRuntime(
+ ast_value_factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kGetPrototype), tmp, pos);
+ args->InsertAt(0, get_prototype, zone());
args->Add(function->AsSuperCallReference()->new_target_var(), zone());
- Expression* result = factory()->NewCallRuntime(
- ast_value_factory()->reflect_construct_string(), NULL, args, pos);
- args = new (zone()) ZoneList<Expression*>(1, zone());
- args->Add(result, zone());
return factory()->NewCallRuntime(
- ast_value_factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kInlineCallSuperWithSpread), args, pos);
+ ast_value_factory()->reflect_construct_string(), NULL, args, pos);
} else {
if (function->IsProperty()) {
// Method calls
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 94aa28a1f9..a0be1dfe7e 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -104,9 +104,6 @@ class ParseInfo {
ast_value_factory_ = ast_value_factory;
}
- FunctionLiteral* function() { // TODO(titzer): temporary name adapter
- return literal_;
- }
FunctionLiteral* literal() { return literal_; }
void set_literal(FunctionLiteral* literal) { literal_ = literal; }
@@ -539,23 +536,24 @@ class Parser;
class SingletonLogger;
-struct ParserFormalParameterParsingState
- : public PreParserFormalParameterParsingState {
+struct ParserFormalParameters : FormalParametersBase {
struct Parameter {
- Parameter(Variable* var, Expression* pattern)
- : var(var), pattern(pattern) {}
- Variable* var;
+ Parameter(const AstRawString* name, Expression* pattern,
+ Expression* initializer, bool is_rest)
+ : name(name), pattern(pattern), initializer(initializer),
+ is_rest(is_rest) {}
+ const AstRawString* name;
Expression* pattern;
+ Expression* initializer;
+ bool is_rest;
};
- explicit ParserFormalParameterParsingState(Scope* scope)
- : PreParserFormalParameterParsingState(scope), params(4, scope->zone()) {}
-
+ explicit ParserFormalParameters(Scope* scope)
+ : FormalParametersBase(scope), params(4, scope->zone()) {}
ZoneList<Parameter> params;
- void AddParameter(Variable* var, Expression* pattern) {
- params.Add(Parameter(var, pattern), scope->zone());
- }
+ int Arity() const { return params.length(); }
+ const Parameter& at(int i) const { return params[i]; }
};
@@ -580,8 +578,8 @@ class ParserTraits {
typedef ObjectLiteral::Property* ObjectLiteralProperty;
typedef ZoneList<v8::internal::Expression*>* ExpressionList;
typedef ZoneList<ObjectLiteral::Property*>* PropertyList;
- typedef const v8::internal::AstRawString* FormalParameter;
- typedef ParserFormalParameterParsingState FormalParameterParsingState;
+ typedef ParserFormalParameters::Parameter FormalParameter;
+ typedef ParserFormalParameters FormalParameters;
typedef ZoneList<v8::internal::Statement*>* StatementList;
// For constructing objects returned by the traversing functions.
@@ -774,27 +772,34 @@ class ParserTraits {
}
V8_INLINE void AddParameterInitializationBlock(
- const ParserFormalParameterParsingState& formal_parameters,
+ const ParserFormalParameters& parameters,
ZoneList<v8::internal::Statement*>* body, bool* ok);
V8_INLINE Scope* NewScope(Scope* parent_scope, ScopeType scope_type,
FunctionKind kind = kNormalFunction);
+ V8_INLINE void AddFormalParameter(
+ ParserFormalParameters* parameters, Expression* pattern,
+ Expression* initializer, bool is_rest);
V8_INLINE void DeclareFormalParameter(
- ParserFormalParameterParsingState* parsing_state, Expression* name,
- ExpressionClassifier* classifier, bool is_rest);
+ Scope* scope, const ParserFormalParameters::Parameter& parameter,
+ bool is_simple, ExpressionClassifier* classifier);
void ParseArrowFunctionFormalParameters(
- ParserFormalParameterParsingState* scope, Expression* params,
- const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
- bool* ok);
+ ParserFormalParameters* parameters, Expression* params,
+ const Scanner::Location& params_loc,
+ Scanner::Location* duplicate_loc, bool* ok);
+ void ParseArrowFunctionFormalParameterList(
+ ParserFormalParameters* parameters, Expression* params,
+ const Scanner::Location& params_loc,
+ Scanner::Location* duplicate_loc, bool* ok);
- void ReindexLiterals(const ParserFormalParameterParsingState& parsing_state);
+ void ReindexLiterals(const ParserFormalParameters& parameters);
// Temporary glue; these functions will move to ParserBase.
Expression* ParseV8Intrinsic(bool* ok);
FunctionLiteral* ParseFunctionLiteral(
const AstRawString* name, Scanner::Location function_name_location,
- bool name_is_strict_reserved, FunctionKind kind,
+ FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
FunctionLiteral::ArityRestriction arity_restriction,
LanguageMode language_mode, bool* ok);
@@ -803,8 +808,8 @@ class ParserTraits {
Scanner::BookmarkScope* bookmark = nullptr);
V8_INLINE ZoneList<Statement*>* ParseEagerFunctionBody(
const AstRawString* name, int pos,
- const ParserFormalParameterParsingState& formal_parameters,
- Variable* fvar, Token::Value fvar_init_op, FunctionKind kind, bool* ok);
+ const ParserFormalParameters& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok);
ClassLiteral* ParseClassLiteral(const AstRawString* name,
Scanner::Location class_name_location,
@@ -1097,7 +1102,7 @@ class Parser : public ParserBase<ParserTraits> {
FunctionLiteral* ParseFunctionLiteral(
const AstRawString* name, Scanner::Location function_name_location,
- bool name_is_strict_reserved, FunctionKind kind,
+ FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
FunctionLiteral::ArityRestriction arity_restriction,
LanguageMode language_mode, bool* ok);
@@ -1156,13 +1161,13 @@ class Parser : public ParserBase<ParserTraits> {
SingletonLogger* logger, Scanner::BookmarkScope* bookmark = nullptr);
Block* BuildParameterInitializationBlock(
- const ParserFormalParameterParsingState& formal_parameters, bool* ok);
+ const ParserFormalParameters& parameters, bool* ok);
// Consumes the ending }.
ZoneList<Statement*>* ParseEagerFunctionBody(
const AstRawString* function_name, int pos,
- const ParserFormalParameterParsingState& formal_parameters,
- Variable* fvar, Token::Value fvar_init_op, FunctionKind kind, bool* ok);
+ const ParserFormalParameters& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok);
void ThrowPendingError(Isolate* isolate, Handle<Script> script);
@@ -1226,11 +1231,10 @@ void ParserTraits::SkipLazyFunctionBody(int* materialized_literal_count,
ZoneList<Statement*>* ParserTraits::ParseEagerFunctionBody(
- const AstRawString* name, int pos,
- const ParserFormalParameterParsingState& formal_parameters, Variable* fvar,
- Token::Value fvar_init_op, FunctionKind kind, bool* ok) {
- return parser_->ParseEagerFunctionBody(name, pos, formal_parameters, fvar,
- fvar_init_op, kind, ok);
+ const AstRawString* name, int pos, const ParserFormalParameters& parameters,
+ FunctionKind kind, FunctionLiteral::FunctionType function_type, bool* ok) {
+ return parser_->ParseEagerFunctionBody(name, pos, parameters, kind,
+ function_type, ok);
}
void ParserTraits::CheckConflictingVarDeclarations(v8::internal::Scope* scope,
@@ -1309,40 +1313,55 @@ Expression* ParserTraits::SpreadCallNew(
}
-void ParserTraits::DeclareFormalParameter(
- ParserFormalParameterParsingState* parsing_state, Expression* pattern,
- ExpressionClassifier* classifier, bool is_rest) {
- bool is_duplicate = false;
- bool is_simple_name = pattern->IsVariableProxy();
- DCHECK(parser_->allow_harmony_destructuring() || is_simple_name);
-
- const AstRawString* name = is_simple_name
+void ParserTraits::AddFormalParameter(
+ ParserFormalParameters* parameters,
+ Expression* pattern, Expression* initializer, bool is_rest) {
+ bool is_simple = pattern->IsVariableProxy() && initializer == nullptr;
+ DCHECK(parser_->allow_harmony_destructuring() ||
+ parser_->allow_harmony_rest_parameters() ||
+ parser_->allow_harmony_default_parameters() || is_simple);
+ const AstRawString* name = is_simple
? pattern->AsVariableProxy()->raw_name()
: parser_->ast_value_factory()->empty_string();
+ parameters->params.Add(
+ ParserFormalParameters::Parameter(name, pattern, initializer, is_rest),
+ parameters->scope->zone());
+}
+
+
+void ParserTraits::DeclareFormalParameter(
+ Scope* scope, const ParserFormalParameters::Parameter& parameter,
+ bool is_simple, ExpressionClassifier* classifier) {
+ bool is_duplicate = false;
+ // TODO(caitp): Remove special handling for rest once desugaring is in.
+ auto name = is_simple || parameter.is_rest
+ ? parameter.name : parser_->ast_value_factory()->empty_string();
+ auto mode = is_simple || parameter.is_rest ? VAR : TEMPORARY;
Variable* var =
- parsing_state->scope->DeclareParameter(name, VAR, is_rest, &is_duplicate);
- parsing_state->AddParameter(var, is_simple_name ? nullptr : pattern);
- if (is_sloppy(parsing_state->scope->language_mode())) {
+ scope->DeclareParameter(name, mode, parameter.is_rest, &is_duplicate);
+ if (is_duplicate) {
+ classifier->RecordDuplicateFormalParameterError(
+ parser_->scanner()->location());
+ }
+ if (is_sloppy(scope->language_mode())) {
// TODO(sigurds) Mark every parameter as maybe assigned. This is a
// conservative approximation necessary to account for parameters
// that are assigned via the arguments array.
var->set_maybe_assigned();
}
- if (is_duplicate) {
- classifier->RecordDuplicateFormalParameterError(
- parser_->scanner()->location());
- }
}
void ParserTraits::AddParameterInitializationBlock(
- const ParserFormalParameterParsingState& formal_parameters,
+ const ParserFormalParameters& parameters,
ZoneList<v8::internal::Statement*>* body, bool* ok) {
- auto* init_block =
- parser_->BuildParameterInitializationBlock(formal_parameters, ok);
- if (!*ok) return;
- if (init_block != nullptr) {
- body->Add(init_block, parser_->zone());
+ if (!parameters.is_simple) {
+ auto* init_block =
+ parser_->BuildParameterInitializationBlock(parameters, ok);
+ if (!*ok) return;
+ if (init_block != nullptr) {
+ body->Add(init_block, parser_->zone());
+ }
}
}
} } // namespace v8::internal
diff --git a/deps/v8/src/pattern-rewriter.cc b/deps/v8/src/pattern-rewriter.cc
index 6969cf214e..10702d65ce 100644
--- a/deps/v8/src/pattern-rewriter.cc
+++ b/deps/v8/src/pattern-rewriter.cc
@@ -214,8 +214,8 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
Variable* Parser::PatternRewriter::CreateTempVar(Expression* value) {
- auto temp_scope = descriptor_->parser->scope_->DeclarationScope();
- auto temp = temp_scope->NewTemporary(ast_value_factory()->empty_string());
+ auto temp = descriptor_->parser->scope_->NewTemporary(
+ ast_value_factory()->empty_string());
if (value != nullptr) {
auto assignment = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(temp), value,
diff --git a/deps/v8/src/pending-compilation-error-handler.cc b/deps/v8/src/pending-compilation-error-handler.cc
index 10a10320a6..f1f9a20e58 100644
--- a/deps/v8/src/pending-compilation-error-handler.cc
+++ b/deps/v8/src/pending-compilation-error-handler.cc
@@ -4,7 +4,7 @@
#include "src/pending-compilation-error-handler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/handles.h"
#include "src/isolate.h"
#include "src/messages.h"
@@ -31,10 +31,10 @@ void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
Handle<Object> error;
switch (error_type_) {
case kReferenceError:
- error = factory->NewError("MakeReferenceError", message_, argument);
+ error = factory->NewReferenceError(message_, argument);
break;
case kSyntaxError:
- error = factory->NewError("MakeSyntaxError", message_, argument);
+ error = factory->NewSyntaxError(message_, argument);
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/ppc/OWNERS b/deps/v8/src/ppc/OWNERS
index a04d29a94f..eb007cb908 100644
--- a/deps/v8/src/ppc/OWNERS
+++ b/deps/v8/src/ppc/OWNERS
@@ -1,3 +1,4 @@
+jyan@ca.ibm.com
dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 0e759efec1..35968fc682 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -40,7 +40,7 @@
#include "src/ppc/assembler-ppc.h"
#include "src/assembler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
namespace v8 {
@@ -50,7 +50,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
-void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+void RelocInfo::apply(intptr_t delta) {
// absolute code pointer inside code object moves with the code object.
if (IsInternalReference(rmode_)) {
// Jump table entry
@@ -61,7 +61,7 @@ void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
DCHECK(IsInternalReferenceEncoded(rmode_));
Address target = Assembler::target_address_at(pc_, host_);
Assembler::set_target_address_at(pc_, host_, target + delta,
- icache_flush_mode);
+ SKIP_ICACHE_FLUSH);
}
}
@@ -146,11 +146,6 @@ void RelocInfo::set_target_address(Address target,
}
-Address Assembler::break_address_from_return_address(Address pc) {
- return target_address_from_return_address(pc);
-}
-
-
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
@@ -297,19 +292,14 @@ void RelocInfo::set_code_age_stub(Code* stub,
}
-Address RelocInfo::call_address() {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- // The pc_ offset of 0 assumes patched return sequence per
- // BreakLocation::SetDebugBreakAtReturn(), or debug break
- // slot per BreakLocation::SetDebugBreakAtSlot().
+Address RelocInfo::debug_call_address() {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
return Assembler::target_address_at(pc_, host_);
}
-void RelocInfo::set_call_address(Address target) {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+void RelocInfo::set_debug_call_address(Address target) {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
Assembler::set_target_address_at(pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -319,21 +309,6 @@ void RelocInfo::set_call_address(Address target) {
}
-Object* RelocInfo::call_object() { return *call_object_address(); }
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-Object** RelocInfo::call_object_address() {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@@ -399,10 +374,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
- } else if (((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- isolate->debug()->has_break_points()) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(this);
} else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
@@ -426,10 +399,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
@@ -465,8 +436,33 @@ void Assembler::CheckBuffer() {
}
}
+void Assembler::TrackBranch() {
+ DCHECK(!trampoline_emitted_);
+ int count = tracked_branch_count_++;
+ if (count == 0) {
+ // We leave space (kMaxBlockTrampolineSectionSize)
+ // for BlockTrampolinePoolScope buffer.
+ next_trampoline_check_ =
+ pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize;
+ } else {
+ next_trampoline_check_ -= kTrampolineSlotsSize;
+ }
+}
+
+void Assembler::UntrackBranch() {
+ DCHECK(!trampoline_emitted_);
+ DCHECK(tracked_branch_count_ > 0);
+ int count = --tracked_branch_count_;
+ if (count == 0) {
+ // Reset
+ next_trampoline_check_ = kMaxInt;
+ } else {
+ next_trampoline_check_ += kTrampolineSlotsSize;
+ }
+}
+
void Assembler::CheckTrampolinePoolQuick() {
- if (pc_offset() >= next_buffer_check_) {
+ if (pc_offset() >= next_trampoline_check_) {
CheckTrampolinePool();
}
}
@@ -597,12 +593,12 @@ void Assembler::PatchConstantPoolAccessInstruction(
ConstantPoolEntry::Type type) {
Address pc = buffer_ + pc_offset;
bool overflowed = (access == ConstantPoolEntry::OVERFLOWED);
+ CHECK(overflowed != is_int16(offset));
#ifdef DEBUG
ConstantPoolEntry::Access access_check =
static_cast<ConstantPoolEntry::Access>(-1);
DCHECK(IsConstantPoolLoadStart(pc, &access_check));
DCHECK(access_check == access);
- DCHECK(overflowed != is_int16(offset));
#endif
if (overflowed) {
int hi_word = static_cast<int>(offset >> 16);
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index b74a9f17cb..542968d8e7 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -34,8 +34,6 @@
// modified significantly by Google Inc.
// Copyright 2014 the V8 project authors. All rights reserved.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
#include "src/base/bits.h"
@@ -212,16 +210,12 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
no_trampoline_pool_before_ = 0;
trampoline_pool_blocked_nesting_ = 0;
constant_pool_entry_sharing_blocked_nesting_ = 0;
- // We leave space (kMaxBlockTrampolineSectionSize)
- // for BlockTrampolinePoolScope buffer.
- next_buffer_check_ =
- FLAG_force_long_branches ? kMaxInt : kMaxCondBranchReach -
- kMaxBlockTrampolineSectionSize;
+ next_trampoline_check_ = kMaxInt;
internal_trampoline_exception_ = false;
last_bound_pos_ = 0;
optimizable_cmpi_pos_ = -1;
trampoline_emitted_ = FLAG_force_long_branches;
- unbound_labels_count_ = 0;
+ tracked_branch_count_ = 0;
ClearRecordedAstId();
relocations_.reserve(128);
}
@@ -427,14 +421,18 @@ int Assembler::target_at(int pos) {
}
-void Assembler::target_at_put(int pos, int target_pos) {
+void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
Instr instr = instr_at(pos);
int opcode = instr & kOpcodeMask;
+ if (is_branch != nullptr) {
+ *is_branch = (opcode == BX || opcode == BCX);
+ }
+
switch (opcode) {
case BX: {
int imm26 = target_pos - pos;
- DCHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
+ CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
if (imm26 == kInstrSize && !(instr & kLKMask)) {
// Branch to next instr without link.
instr = ORI; // nop: ori, 0,0,0
@@ -447,7 +445,7 @@ void Assembler::target_at_put(int pos, int target_pos) {
}
case BCX: {
int imm16 = target_pos - pos;
- DCHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
+ CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
if (imm16 == kInstrSize && !(instr & kLKMask)) {
// Branch to next instr without link.
instr = ORI; // nop: ori, 0,0,0
@@ -528,11 +526,7 @@ int Assembler::max_reach_from(int pos) {
void Assembler::bind_to(Label* L, int pos) {
DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
int32_t trampoline_pos = kInvalidSlotPos;
- if (L->is_linked() && !trampoline_emitted_) {
- unbound_labels_count_--;
- next_buffer_check_ += kTrampolineSlotsSize;
- }
-
+ bool is_branch = false;
while (L->is_linked()) {
int fixup_pos = L->pos();
int32_t offset = pos - fixup_pos;
@@ -546,11 +540,15 @@ void Assembler::bind_to(Label* L, int pos) {
}
target_at_put(fixup_pos, trampoline_pos);
} else {
- target_at_put(fixup_pos, pos);
+ target_at_put(fixup_pos, pos, &is_branch);
}
}
L->bind_to(pos);
+ if (!trampoline_emitted_ && is_branch) {
+ UntrackBranch();
+ }
+
// Keep track of the last bound label so we don't eliminate any instructions
// before a bound label.
if (pos > last_bound_pos_) last_bound_pos_ = pos;
@@ -598,14 +596,14 @@ void Assembler::d_form(Instr instr, Register rt, Register ra,
if (!is_int16(val)) {
PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
}
- DCHECK(is_int16(val));
+ CHECK(is_int16(val));
} else {
if (!is_uint16(val)) {
PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
val, val, is_uint16(val), kImm16Mask);
}
- DCHECK(is_uint16(val));
+ CHECK(is_uint16(val));
}
emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
}
@@ -673,10 +671,6 @@ int Assembler::link(Label* L) {
// should avoid most instances of branch offset overflow. See
// target_at() for where this is converted back to kEndOfChain.
position = pc_offset();
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
}
L->link_to(pc_offset());
}
@@ -688,35 +682,36 @@ int Assembler::link(Label* L) {
// Branch instructions.
-void Assembler::bclr(BOfield bo, LKBit lk) {
+void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
positions_recorder()->WriteRecordedPositions();
- emit(EXT1 | bo | BCLRX | lk);
+ emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
}
-void Assembler::bcctr(BOfield bo, LKBit lk) {
+void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
positions_recorder()->WriteRecordedPositions();
- emit(EXT1 | bo | BCCTRX | lk);
+ emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
}
// Pseudo op - branch to link register
-void Assembler::blr() { bclr(BA, LeaveLK); }
+void Assembler::blr() { bclr(BA, 0, LeaveLK); }
// Pseudo op - branch to count register -- used for "jump"
-void Assembler::bctr() { bcctr(BA, LeaveLK); }
+void Assembler::bctr() { bcctr(BA, 0, LeaveLK); }
-void Assembler::bctrl() { bcctr(BA, SetLK); }
+void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
if (lk == SetLK) {
positions_recorder()->WriteRecordedPositions();
}
- DCHECK(is_int16(branch_offset));
- emit(BCX | bo | condition_bit * B16 | (kImm16Mask & branch_offset) | lk);
+ int imm16 = branch_offset;
+ CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
+ emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
}
@@ -724,10 +719,8 @@ void Assembler::b(int branch_offset, LKBit lk) {
if (lk == SetLK) {
positions_recorder()->WriteRecordedPositions();
}
- DCHECK((branch_offset & 3) == 0);
int imm26 = branch_offset;
- DCHECK(is_int26(imm26));
- // todo add AA and LK bits
+ CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
emit(BX | (imm26 & kImm26Mask) | lk);
}
@@ -1192,7 +1185,7 @@ void Assembler::lwa(Register dst, const MemOperand& src) {
#if V8_TARGET_ARCH_PPC64
int offset = src.offset();
DCHECK(!src.ra_.is(r0));
- DCHECK(!(offset & 3) && is_int16(offset));
+ CHECK(!(offset & 3) && is_int16(offset));
offset = kImm16Mask & offset;
emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
#else
@@ -1326,7 +1319,7 @@ void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) {
void Assembler::ld(Register rd, const MemOperand& src) {
int offset = src.offset();
DCHECK(!src.ra_.is(r0));
- DCHECK(!(offset & 3) && is_int16(offset));
+ CHECK(!(offset & 3) && is_int16(offset));
offset = kImm16Mask & offset;
emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
}
@@ -1343,7 +1336,7 @@ void Assembler::ldx(Register rd, const MemOperand& src) {
void Assembler::ldu(Register rd, const MemOperand& src) {
int offset = src.offset();
DCHECK(!src.ra_.is(r0));
- DCHECK(!(offset & 3) && is_int16(offset));
+ CHECK(!(offset & 3) && is_int16(offset));
offset = kImm16Mask & offset;
emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
}
@@ -1360,7 +1353,7 @@ void Assembler::ldux(Register rd, const MemOperand& src) {
void Assembler::std(Register rs, const MemOperand& src) {
int offset = src.offset();
DCHECK(!src.ra_.is(r0));
- DCHECK(!(offset & 3) && is_int16(offset));
+ CHECK(!(offset & 3) && is_int16(offset));
offset = kImm16Mask & offset;
emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
}
@@ -1377,7 +1370,7 @@ void Assembler::stdx(Register rs, const MemOperand& src) {
void Assembler::stdu(Register rs, const MemOperand& src) {
int offset = src.offset();
DCHECK(!src.ra_.is(r0));
- DCHECK(!(offset & 3) && is_int16(offset));
+ CHECK(!(offset & 3) && is_int16(offset));
offset = kImm16Mask & offset;
emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
}
@@ -1928,7 +1921,7 @@ void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
DCHECK(!ra.is(r0));
- DCHECK(is_int16(offset));
+ CHECK(is_int16(offset));
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
@@ -1939,7 +1932,7 @@ void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
DCHECK(!ra.is(r0));
- DCHECK(is_int16(offset));
+ CHECK(is_int16(offset));
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
@@ -1967,7 +1960,7 @@ void Assembler::lfdux(const DoubleRegister frt, const MemOperand& src) {
void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
- DCHECK(is_int16(offset));
+ CHECK(is_int16(offset));
DCHECK(!ra.is(r0));
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
@@ -1978,7 +1971,7 @@ void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
- DCHECK(is_int16(offset));
+ CHECK(is_int16(offset));
DCHECK(!ra.is(r0));
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
@@ -2007,7 +2000,7 @@ void Assembler::lfsux(const DoubleRegister frt, const MemOperand& src) {
void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
- DCHECK(is_int16(offset));
+ CHECK(is_int16(offset));
DCHECK(!ra.is(r0));
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
@@ -2018,7 +2011,7 @@ void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
- DCHECK(is_int16(offset));
+ CHECK(is_int16(offset));
DCHECK(!ra.is(r0));
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
@@ -2047,7 +2040,7 @@ void Assembler::stfdux(const DoubleRegister frs, const MemOperand& src) {
void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
- DCHECK(is_int16(offset));
+ CHECK(is_int16(offset));
DCHECK(!ra.is(r0));
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
@@ -2058,7 +2051,7 @@ void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
- DCHECK(is_int16(offset));
+ CHECK(is_int16(offset));
DCHECK(!ra.is(r0));
int imm16 = offset & kImm16Mask;
// could be x_form instruction with some casting magic
@@ -2407,46 +2400,29 @@ void Assembler::CheckTrampolinePool() {
// either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
// which are both checked here. Also, recursive calls to CheckTrampolinePool
// are blocked by trampoline_pool_blocked_nesting_.
- if ((trampoline_pool_blocked_nesting_ > 0) ||
- (pc_offset() < no_trampoline_pool_before_)) {
- // Emission is currently blocked; make sure we try again as soon as
- // possible.
- if (trampoline_pool_blocked_nesting_ > 0) {
- next_buffer_check_ = pc_offset() + kInstrSize;
- } else {
- next_buffer_check_ = no_trampoline_pool_before_;
- }
+ if (trampoline_pool_blocked_nesting_ > 0) return;
+ if (pc_offset() < no_trampoline_pool_before_) {
+ next_trampoline_check_ = no_trampoline_pool_before_;
return;
}
DCHECK(!trampoline_emitted_);
- DCHECK(unbound_labels_count_ >= 0);
- if (unbound_labels_count_ > 0) {
- // First we emit jump, then we emit trampoline pool.
- {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- Label after_pool;
- b(&after_pool);
-
- int pool_start = pc_offset();
- for (int i = 0; i < unbound_labels_count_; i++) {
- b(&after_pool);
- }
- bind(&after_pool);
- trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+ if (tracked_branch_count_ > 0) {
+ int size = tracked_branch_count_ * kInstrSize;
- trampoline_emitted_ = true;
- // As we are only going to emit trampoline once, we need to prevent any
- // further emission.
- next_buffer_check_ = kMaxInt;
+ // As we are only going to emit trampoline once, we need to prevent any
+ // further emission.
+ trampoline_emitted_ = true;
+ next_trampoline_check_ = kMaxInt;
+
+ // First we emit jump, then we emit trampoline pool.
+ b(size + kInstrSize, LeaveLK);
+ for (int i = size; i > 0; i -= kInstrSize) {
+ b(i, LeaveLK);
}
- } else {
- // Number of branches to unbound label at this point is zero, so we can
- // move next buffer check to maximum.
- next_buffer_check_ =
- pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize;
+
+ trampoline_ = Trampoline(pc_offset() - size, tracked_branch_count_);
}
- return;
}
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 82d068503d..a1c08ad0ea 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -591,10 +591,11 @@ class Assembler : public AssemblerBase {
// Returns the branch offset to the given label from the current code position
// Links the label to the current position if it is still unbound
- // Manages the jump elimination optimization if the second parameter is true.
- int branch_offset(Label* L, bool jump_elimination_allowed) {
- int position = link(L);
- return position - pc_offset();
+ int branch_offset(Label* L) {
+ if (L->is_unused() && !trampoline_emitted_) {
+ TrackBranch();
+ }
+ return link(L) - pc_offset();
}
// Puts a labels target address at the given position.
@@ -642,9 +643,6 @@ class Assembler : public AssemblerBase {
// in the instruction stream that the call will return to.
INLINE(static Address return_address_from_call_start(Address pc));
- // Return the code target address of the patch debug break slot
- INLINE(static Address break_address_from_return_address(Address pc));
-
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -695,14 +693,6 @@ class Assembler : public AssemblerBase {
static const int kCallTargetAddressOffset =
(kMovInstructions + 2) * kInstrSize;
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- // Patched return sequence is a FIXED_SEQUENCE:
- // mov r0, <address>
- // mtlr r0
- // blrl
- static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize;
-
// Distance between start of patched debug break slot and the emitted address
// to jump to.
// Patched debug break slot code is a FIXED_SEQUENCE:
@@ -711,13 +701,6 @@ class Assembler : public AssemblerBase {
// blrl
static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
- // This is the length of the BreakLocation::SetDebugBreakAtReturn()
- // code patch FIXED_SEQUENCE
- static const int kJSReturnSequenceInstructions =
- kMovInstructionsNoConstantPool + 3;
- static const int kJSReturnSequenceLength =
- kJSReturnSequenceInstructions * kInstrSize;
-
// This is the length of the code sequence from SetDebugBreakAtSlot()
// FIXED_SEQUENCE
static const int kDebugBreakSlotInstructions =
@@ -743,17 +726,17 @@ class Assembler : public AssemblerBase {
void CodeTargetAlign();
// Branch instructions
- void bclr(BOfield bo, LKBit lk);
+ void bclr(BOfield bo, int condition_bit, LKBit lk);
void blr();
void bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk = LeaveLK);
void b(int branch_offset, LKBit lk);
- void bcctr(BOfield bo, LKBit lk);
+ void bcctr(BOfield bo, int condition_bit, LKBit lk);
void bctr();
void bctrl();
// Convenience branch instructions using labels
- void b(Label* L, LKBit lk = LeaveLK) { b(branch_offset(L, false), lk); }
+ void b(Label* L, LKBit lk = LeaveLK) { b(branch_offset(L), lk); }
inline CRegister cmpi_optimization(CRegister cr) {
// Check whether the branch is preceeded by an optimizable cmpi against 0.
@@ -798,7 +781,7 @@ class Assembler : public AssemblerBase {
cr = cmpi_optimization(cr);
- int b_offset = branch_offset(L, false);
+ int b_offset = branch_offset(L);
switch (cond) {
case eq:
@@ -836,6 +819,48 @@ class Assembler : public AssemblerBase {
}
}
+ void bclr(Condition cond, CRegister cr = cr7, LKBit lk = LeaveLK) {
+ DCHECK(cond != al);
+ DCHECK(cr.code() >= 0 && cr.code() <= 7);
+
+ cr = cmpi_optimization(cr);
+
+ switch (cond) {
+ case eq:
+ bclr(BT, encode_crbit(cr, CR_EQ), lk);
+ break;
+ case ne:
+ bclr(BF, encode_crbit(cr, CR_EQ), lk);
+ break;
+ case gt:
+ bclr(BT, encode_crbit(cr, CR_GT), lk);
+ break;
+ case le:
+ bclr(BF, encode_crbit(cr, CR_GT), lk);
+ break;
+ case lt:
+ bclr(BT, encode_crbit(cr, CR_LT), lk);
+ break;
+ case ge:
+ bclr(BF, encode_crbit(cr, CR_LT), lk);
+ break;
+ case unordered:
+ bclr(BT, encode_crbit(cr, CR_FU), lk);
+ break;
+ case ordered:
+ bclr(BF, encode_crbit(cr, CR_FU), lk);
+ break;
+ case overflow:
+ bclr(BT, encode_crbit(cr, CR_SO), lk);
+ break;
+ case nooverflow:
+ bclr(BF, encode_crbit(cr, CR_SO), lk);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+
void isel(Register rt, Register ra, Register rb, int cb);
void isel(Condition cond, Register rt, Register ra, Register rb,
CRegister cr = cr7) {
@@ -931,7 +956,7 @@ class Assembler : public AssemblerBase {
// Decrement CTR; branch if CTR != 0
void bdnz(Label* L, LKBit lk = LeaveLK) {
- bc(branch_offset(L, false), DCBNZ, 0, lk);
+ bc(branch_offset(L), DCBNZ, 0, lk);
}
// Data-processing instructions
@@ -1296,11 +1321,11 @@ class Assembler : public AssemblerBase {
// Debugging
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
+ // Mark generator continuation.
+ void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot();
+ void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
@@ -1419,11 +1444,12 @@ class Assembler : public AssemblerBase {
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
- // Decode branch instruction at pos and return branch target pos
+ // Decode instruction(s) at pos and return backchain to previous
+ // label reference or kEndOfChain.
int target_at(int pos);
- // Patch branch instruction at pos to branch to given branch target pos
- void target_at_put(int pos, int target_pos);
+ // Patch instruction(s) at pos to target target_pos (e.g. branch)
+ void target_at_put(int pos, int target_pos, bool* is_branch = nullptr);
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
@@ -1475,7 +1501,7 @@ class Assembler : public AssemblerBase {
// Repeated checking whether the trampoline pool should be emitted is rather
// expensive. By default we only check again once a number of instructions
// has been generated.
- int next_buffer_check_; // pc offset of next buffer check.
+ int next_trampoline_check_; // pc offset of next buffer check.
// Emission of the trampoline pool may be blocked in some code sequences.
int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
@@ -1502,6 +1528,8 @@ class Assembler : public AssemblerBase {
inline void CheckBuffer();
void GrowBuffer(int needed = 0);
inline void emit(Instr x);
+ inline void TrackBranch();
+ inline void UntrackBranch();
inline void CheckTrampolinePoolQuick();
// Instruction generation
@@ -1555,7 +1583,7 @@ class Assembler : public AssemblerBase {
};
int32_t get_trampoline_entry();
- int unbound_labels_count_;
+ int tracked_branch_count_;
// If trampoline is emitted, generated code is becoming large. As
// this is already a slow case which can possibly break our code
// generation for the extreme case, we use this information to
diff --git a/deps/v8/src/ppc/builtins-ppc.cc b/deps/v8/src/ppc/builtins-ppc.cc
index a588eb298d..6ecfcea2f6 100644
--- a/deps/v8/src/ppc/builtins-ppc.cc
+++ b/deps/v8/src/ppc/builtins-ppc.cc
@@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
#include "src/codegen.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -307,37 +305,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
-static void Generate_Runtime_NewObject(MacroAssembler* masm,
- bool create_memento,
- Register original_constructor,
- Label* count_incremented,
- Label* allocated) {
- // ----------- S t a t e -------------
- // -- r4: argument for Runtime_NewObject
- // -----------------------------------
- Register result = r7;
-
- if (create_memento) {
- // Get the cell or allocation site.
- __ LoadP(r5, MemOperand(sp, 2 * kPointerSize));
- __ Push(r5, r4, original_constructor);
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- __ mr(result, r3);
- // Runtime_NewObjectWithAllocationSite increments allocation count.
- // Skip the increment.
- __ b(count_incremented);
- } else {
- __ Push(r4, original_constructor);
- __ CallRuntime(Runtime::kNewObject, 2);
- __ mr(result, r3);
- __ b(allocated);
- }
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
@@ -357,32 +326,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
- if (create_memento) {
- __ AssertUndefinedOrAllocationSite(r5, r7);
- __ push(r5);
- }
-
// Preserve the incoming parameters on the stack.
+ __ AssertUndefinedOrAllocationSite(r5, r7);
__ SmiTag(r3);
- if (use_new_target) {
- __ Push(r3, r4, r6);
- } else {
- __ Push(r3, r4);
- }
-
- Label rt_call, allocated, normal_new, count_incremented;
- __ cmp(r4, r6);
- __ beq(&normal_new);
-
- // Original constructor and function are different.
- Generate_Runtime_NewObject(masm, create_memento, r6, &count_incremented,
- &allocated);
- __ bind(&normal_new);
+ __ Push(r5, r3, r4, r6);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
if (FLAG_inline_new) {
- Label undo_allocation;
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(isolate);
__ mov(r5, Operand(debug_step_in_fp));
@@ -390,12 +342,16 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ cmpi(r5, Operand::Zero());
__ bne(&rt_call);
+ // Fall back to runtime if the original constructor and function differ.
+ __ cmp(r4, r6);
+ __ bne(&rt_call);
+
// Load the initial map and verify that it is in fact a map.
// r4: constructor function
__ LoadP(r5,
FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(r5, &rt_call);
- __ CompareObjectType(r5, r6, r7, MAP_TYPE);
+ __ CompareObjectType(r5, r8, r7, MAP_TYPE);
__ bne(&rt_call);
// Check that the constructor is not constructing a JSFunction (see
@@ -403,7 +359,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map's instance type would be JS_FUNCTION_TYPE.
// r4: constructor function
// r5: initial map
- __ CompareInstanceType(r5, r6, JS_FUNCTION_TYPE);
+ __ CompareInstanceType(r5, r8, JS_FUNCTION_TYPE);
__ beq(&rt_call);
if (!is_api_function) {
@@ -433,12 +389,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Now allocate the JSObject on the heap.
// r4: constructor function
// r5: initial map
+ Label rt_call_reload_new_target;
__ lbz(r6, FieldMemOperand(r5, Map::kInstanceSizeOffset));
if (create_memento) {
__ addi(r6, r6, Operand(AllocationMemento::kSize / kPointerSize));
}
- __ Allocate(r6, r7, r8, r9, &rt_call, SIZE_IN_WORDS);
+ __ Allocate(r6, r7, r8, r9, &rt_call_reload_new_target, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
@@ -474,7 +431,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ blt(&no_inobject_slack_tracking);
// Allocate object with a slack.
- __ lbz(r3, FieldMemOperand(r5, Map::kPreAllocatedPropertyFieldsOffset));
+ __ lbz(
+ r3,
+ FieldMemOperand(
+ r5, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
+ __ lbz(r5, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
+ __ sub(r3, r3, r5);
if (FLAG_debug_code) {
__ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
__ add(r0, r8, r0);
@@ -505,7 +467,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ LoadRoot(r10, Heap::kAllocationMementoMapRootIndex);
__ StoreP(r10, MemOperand(r8, AllocationMemento::kMapOffset));
// Load the AllocationSite
- __ LoadP(r10, MemOperand(sp, 2 * kPointerSize));
+ __ LoadP(r10, MemOperand(sp, 3 * kPointerSize));
+ __ AssertUndefinedOrAllocationSite(r10, r3);
__ StoreP(r10,
MemOperand(r8, AllocationMemento::kAllocationSiteOffset));
__ addi(r8, r8, Operand(AllocationMemento::kAllocationSiteOffset +
@@ -515,109 +478,46 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
+ // and jump into the continuation code at any time from now on.
__ addi(r7, r7, Operand(kHeapObjectTag));
- // Check if a non-empty properties array is needed. Continue with
- // allocated object if not; allocate and initialize a FixedArray if yes.
- // r4: constructor function
- // r7: JSObject
- // r8: start of next object (not tagged)
- __ lbz(r6, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields
- // and in-object properties.
- __ lbz(r0, FieldMemOperand(r5, Map::kPreAllocatedPropertyFieldsOffset));
- __ add(r6, r6, r0);
- __ lbz(r0, FieldMemOperand(r5, Map::kInObjectPropertiesOffset));
- __ sub(r6, r6, r0, LeaveOE, SetRC);
-
- // Done if no extra properties are to be allocated.
- __ beq(&allocated, cr0);
- __ Assert(ge, kPropertyAllocationCountFailed, cr0);
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // r4: constructor
- // r6: number of elements in properties array
- // r7: JSObject
- // r8: start of next object
- __ addi(r3, r6, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ Allocate(
- r3, r8, r9, r5, &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
- // Initialize the FixedArray.
- // r4: constructor
- // r6: number of elements in properties array
- // r7: JSObject
- // r8: FixedArray (not tagged)
- __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
- __ mr(r5, r8);
- DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ StoreP(r9, MemOperand(r5));
- DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ SmiTag(r3, r6);
- __ StoreP(r3, MemOperand(r5, kPointerSize));
- __ addi(r5, r5, Operand(2 * kPointerSize));
-
- // Initialize the fields to undefined.
- // r4: constructor function
- // r5: First element of FixedArray (not tagged)
- // r6: number of elements in properties array
- // r7: JSObject
- // r8: FixedArray (not tagged)
- DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- {
- Label done;
- __ cmpi(r6, Operand::Zero());
- __ beq(&done);
- if (!is_api_function || create_memento) {
- __ LoadRoot(r10, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(r11, Heap::kUndefinedValueRootIndex);
- __ cmp(r10, r11);
- __ Assert(eq, kUndefinedValueNotLoaded);
- }
- __ InitializeNFieldsWithFiller(r5, r6, r10);
- __ bind(&done);
- }
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // r4: constructor function
- // r7: JSObject
- // r8: FixedArray (not tagged)
- __ addi(r8, r8, Operand(kHeapObjectTag)); // Add the heap tag.
- __ StoreP(r8, FieldMemOperand(r7, JSObject::kPropertiesOffset), r0);
-
// Continue with JSObject being successfully allocated
- // r4: constructor function
// r7: JSObject
__ b(&allocated);
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // r7: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(r7, r8);
+ // Reload the original constructor and fall-through.
+ __ bind(&rt_call_reload_new_target);
+ __ LoadP(r6, MemOperand(sp, 0 * kPointerSize));
}
// Allocate the new receiver object using the runtime call.
// r4: constructor function
+ // r6: original constructor
__ bind(&rt_call);
- Generate_Runtime_NewObject(masm, create_memento, r4, &count_incremented,
- &allocated);
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ LoadP(r5, MemOperand(sp, 3 * kPointerSize));
+ __ Push(r5, r4, r6);
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ } else {
+ __ Push(r4, r6);
+ __ CallRuntime(Runtime::kNewObject, 2);
+ }
+ __ mr(r7, r3);
+
+ // Runtime_NewObjectWithAllocationSite increments allocation count.
+ // Skip the increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ b(&count_incremented);
+ }
// Receiver for constructor call allocated.
// r7: JSObject
__ bind(&allocated);
if (create_memento) {
- int offset = (use_new_target ? 3 : 2) * kPointerSize;
- __ LoadP(r5, MemOperand(sp, offset));
+ __ LoadP(r5, MemOperand(sp, 3 * kPointerSize));
__ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
__ cmp(r5, r8);
__ beq(&count_incremented);
@@ -633,22 +533,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore the parameters.
- if (use_new_target) {
- __ Pop(r4, ip);
- } else {
- __ pop(r4);
- }
+ __ Pop(r4, ip);
// Retrieve smi-tagged arguments count from the stack.
__ LoadP(r6, MemOperand(sp));
// Push new.target onto the construct frame. This is stored just below the
// receiver on the stack.
- if (use_new_target) {
- __ Push(ip, r7, r7);
- } else {
- __ Push(r7, r7);
- }
+ __ Push(ip, r7, r7);
// Set up pointer to last argument.
__ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -659,8 +551,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r6: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: new.target (if used)
- // sp[2/3]: number of arguments (smi-tagged)
+ // sp[2]: new.target
+ // sp[3]: number of arguments (smi-tagged)
Label loop, no_args;
__ SmiUntag(r3, r6, SetRC);
__ beq(&no_args, cr0);
@@ -687,17 +579,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- // TODO(arv): Remove the "!use_new_target" before supporting optimization
- // of functions that reference new.target
- if (!is_api_function && !use_new_target) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
// r3: result
// sp[0]: receiver
- // sp[1]: new.target (if used)
- // sp[1/2]: number of arguments (smi-tagged)
+ // sp[1]: new.target
+ // sp[2]: number of arguments (smi-tagged)
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
@@ -707,9 +597,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense.
// r3: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (if used)
- // sp[1/2]: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: new.target
+ // sp[2]: number of arguments (smi-tagged)
__ JumpIfSmi(r3, &use_receiver);
// If the type of the result (stored in its map) is less than
@@ -727,10 +617,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&exit);
// r3: result
// sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (if used)
- // sp[1/2]: number of arguments (smi-tagged)
- int offset = (use_new_target ? 2 : 1) * kPointerSize;
- __ LoadP(r4, MemOperand(sp, offset));
+ // sp[1]: new.target (original constructor)
+ // sp[2]: number of arguments (smi-tagged)
+ __ LoadP(r4, MemOperand(sp, 2 * kPointerSize));
// Leave construct frame.
}
@@ -744,17 +633,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, true, false);
}
@@ -768,12 +652,11 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
- // TODO(dslomov): support pretenuring
- CHECK(!FLAG_pretenuring_call_new);
-
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+ __ AssertUndefinedOrAllocationSite(r5, r7);
+
// Smi-tagged arguments count.
__ mr(r7, r3);
__ SmiTag(r7, SetRC);
@@ -781,8 +664,8 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// receiver is the hole.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- // smi arguments count, new.target, receiver
- __ Push(r7, r6, ip);
+ // allocation site, smi arguments count, new.target, receiver
+ __ Push(r5, r7, r6, ip);
// Set up pointer to last argument.
__ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -973,6 +856,147 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// o r4: the JS function object being called.
+// o cp: our context
+// o pp: the caller's constant pool pointer (if enabled)
+// o fp: the caller's frame pointer
+// o sp: stack pointer
+// o lr: return address
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-ppc.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ PushFixedFrame(r4);
+ __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into kInterpreterBytecodeRegister.
+ __ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
+ BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size (word) from the BytecodeArray object.
+ __ lwz(r5, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ __ sub(r6, sp, r5);
+ __ LoadRoot(r0, Heap::kRealStackLimitRootIndex);
+ __ cmpl(r6, r0);
+ __ bge(&ok);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ Label loop, no_args;
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ ShiftRightImm(r5, r5, Operand(kPointerSizeLog2), SetRC);
+ __ beq(&no_args, cr0);
+ __ mtctr(r5);
+ __ bind(&loop);
+ __ push(r6);
+ __ bdnz(&loop);
+ __ bind(&no_args);
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Support profiler (specifically profiling_counter).
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Allow simulator stop operations if FLAG_stop_at is set.
+ // - Deal with sloppy mode functions which need to replace the
+ // receiver with the global proxy when called as functions (without an
+ // explicit receiver object).
+ // - Code aging of the BytecodeArray object.
+ // - Supporting FLAG_trace.
+ //
+ // The following items are also not done here, and will probably be done using
+ // explicit bytecodes instead:
+ // - Allocating a new local context if applicable.
+ // - Setting up a local binding to the this function, which is used in
+ // derived constructors with super calls.
+ // - Setting new.target if required.
+ // - Dealing with REST parameters (only if
+ // https://codereview.chromium.org/1235153006 doesn't land by then).
+ // - Dealing with argument objects.
+
+ // Perform stack guard check.
+ {
+ Label ok;
+ __ LoadRoot(r0, Heap::kStackLimitRootIndex);
+ __ cmp(sp, r0);
+ __ bge(&ok);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ bind(&ok);
+ }
+
+ // Load accumulator, register file, bytecode offset, dispatch table into
+ // registers.
+ __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ subi(
+ kInterpreterRegisterFileRegister, fp,
+ Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ addi(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Dispatch to the first bytecode handler for the function.
+ __ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ShiftLeftImm(ip, r4, Operand(kPointerSizeLog2));
+ __ LoadPX(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(ip);
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // The return value is in accumulator, which is already in r3.
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ // Drop receiver + arguments.
+ __ Drop(1); // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+ __ blr();
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@@ -1318,8 +1342,10 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Enter an internal frame in order to preserve argument count.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(r3);
- __ Push(r3, r5);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ Push(r3);
+ __ mr(r3, r5);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mr(r5, r3);
__ pop(r3);
@@ -1440,6 +1466,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int vectorOffset,
const int argumentsOffset,
const int indexOffset,
const int limitOffset) {
@@ -1456,12 +1483,9 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ LoadP(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
- FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
- Handle<TypeFeedbackVector> feedback_vector =
- masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
- int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
- __ LoadSmiLiteral(slot, Smi::FromInt(index));
- __ Move(vector, feedback_vector);
+ int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
+ __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
+ __ LoadP(vector, MemOperand(fp, vectorOffset));
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -1496,6 +1520,14 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
const int kReceiverOffset = kArgumentsOffset + kPointerSize;
const int kFunctionOffset = kReceiverOffset + kPointerSize;
+ const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+
+ // Push the vector.
+ __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r4,
+ FieldMemOperand(r4, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ push(r4);
__ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function
__ push(r3);
@@ -1510,10 +1542,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
Generate_CheckStackOverflow(masm, kFunctionOffset, r3, kArgcIsSmiTagged);
// Push current limit and index.
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
+ const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
__ li(r4, Operand::Zero());
__ Push(r3, r4); // limit and initial index.
@@ -1572,8 +1602,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
// Convert the receiver to a regular object.
// r3: receiver
__ bind(&call_to_object);
- __ push(r3);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ b(&push_receiver);
__ bind(&use_global_proxy);
@@ -1586,8 +1616,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ push(r3);
// Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kArgumentsOffset, kIndexOffset,
- kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// Call the function.
Label call_proxy;
@@ -1625,6 +1655,14 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
const int kFunctionOffset = kArgumentsOffset + kPointerSize;
+ static const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+
+ // Push the vector.
+ __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r4,
+ FieldMemOperand(r4, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ push(r4);
// If newTarget is not supplied, set it to constructor
Label validate_arguments;
@@ -1647,32 +1685,27 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
Generate_CheckStackOverflow(masm, kFunctionOffset, r3, kArgcIsSmiTagged);
// Push current limit and index.
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
+ const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
__ li(r4, Operand::Zero());
__ Push(r3, r4); // limit and initial index.
- // Push newTarget and callee functions
- __ LoadP(r3, MemOperand(fp, kNewTargetOffset));
- __ push(r3);
+ // Push the constructor function as callee
__ LoadP(r3, MemOperand(fp, kFunctionOffset));
__ push(r3);
// Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kArgumentsOffset, kIndexOffset,
- kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// Use undefined feedback vector
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ LoadP(r4, MemOperand(fp, kFunctionOffset));
+ __ LoadP(r7, MemOperand(fp, kNewTargetOffset));
// Call the function.
CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
-
// Leave internal frame.
}
__ addi(sp, sp, Operand(kStackSize * kPointerSize));
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index cd7d30b1c6..435ac47c00 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
#include "src/base/bits.h"
@@ -14,8 +12,8 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
-#include "src/jsregexp.h"
-#include "src/regexp-macro-assembler.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -33,7 +31,7 @@ static void InitializeArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(r3, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -49,7 +47,7 @@ static void InitializeInternalArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(r3, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -266,6 +264,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpi(r7, Operand(SYMBOL_TYPE));
__ beq(slow);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ cmpi(r7, Operand(SIMD128_VALUE_TYPE));
+ __ beq(slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics, since
// we need to throw a TypeError. Smis have already been ruled out.
@@ -284,6 +285,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpi(r7, Operand(SYMBOL_TYPE));
__ beq(slow);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ cmpi(r7, Operand(SIMD128_VALUE_TYPE));
+ __ beq(slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics,
// since we need to throw a TypeError. Smis and heap numbers have
@@ -355,11 +359,8 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ li(r4, Operand((cond == le) ? GREATER : LESS));
__ isel(eq, r3, r3, r4);
} else {
- Label not_equal;
- __ bne(&not_equal);
// All-zero means Infinity means equal.
- __ Ret();
- __ bind(&not_equal);
+ __ Ret(eq);
if (cond == le) {
__ li(r3, Operand(GREATER)); // NaN <= NaN should fail.
} else {
@@ -390,13 +391,15 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
// If rhs is not a number and lhs is a Smi then strict equality cannot
// succeed. Return non-equal
// If rhs is r3 then there is already a non zero value in it.
- Label skip;
- __ beq(&skip);
if (!rhs.is(r3)) {
+ Label skip;
+ __ beq(&skip);
__ mov(r3, Operand(NOT_EQUAL));
+ __ Ret();
+ __ bind(&skip);
+ } else {
+ __ Ret(ne);
}
- __ Ret();
- __ bind(&skip);
} else {
// Smi compared non-strictly with a non-Smi non-heap-number. Call
// the runtime.
@@ -420,13 +423,15 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
// If lhs is not a number and rhs is a smi then strict equality cannot
// succeed. Return non-equal.
// If lhs is r3 then there is already a non zero value in it.
- Label skip;
- __ beq(&skip);
if (!lhs.is(r3)) {
+ Label skip;
+ __ beq(&skip);
__ mov(r3, Operand(NOT_EQUAL));
+ __ Ret();
+ __ bind(&skip);
+ } else {
+ __ Ret(ne);
}
- __ Ret();
- __ bind(&skip);
} else {
// Smi compared non-strictly with a non-smi non-heap-number. Call
// the runtime.
@@ -701,26 +706,30 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
- if (cc == eq) {
- native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == eq && strict()) {
+ __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
} else {
- native =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- int ncr; // NaN compare result
- if (cc == lt || cc == le) {
- ncr = GREATER;
+ Builtins::JavaScript native;
+ if (cc == eq) {
+ native = Builtins::EQUALS;
} else {
- DCHECK(cc == gt || cc == ge); // remaining cases
- ncr = LESS;
+ native =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if (cc == lt || cc == le) {
+ ncr = GREATER;
+ } else {
+ DCHECK(cc == gt || cc == ge); // remaining cases
+ ncr = LESS;
+ }
+ __ LoadSmiLiteral(r3, Smi::FromInt(ncr));
+ __ push(r3);
}
- __ LoadSmiLiteral(r3, Smi::FromInt(ncr));
- __ push(r3);
- }
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -734,7 +743,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ mflr(r0);
__ MultiPush(kJSCallerSaved | r0.bit());
if (save_doubles()) {
- __ SaveFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters);
+ __ MultiPushDoubles(kCallerSavedDoubles);
}
const int argument_count = 1;
const int fp_argument_count = 0;
@@ -746,7 +755,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
if (save_doubles()) {
- __ RestoreFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters);
+ __ MultiPopDoubles(kCallerSavedDoubles);
}
__ MultiPop(kJSCallerSaved | r0.bit());
__ mtlr(r0);
@@ -1219,11 +1228,10 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Save callee saved registers on the stack.
__ MultiPush(kCalleeSaved);
- // Floating point regs FPR0 - FRP13 are volatile
- // FPR14-FPR31 are non-volatile, but sub-calls will save them for us
-
- // int offset_to_argv = kPointerSize * 22; // matches (22*4) above
- // __ lwz(r7, MemOperand(sp, offset_to_argv));
+ // Save callee-saved double registers.
+ __ MultiPushDoubles(kCalleeSavedDoubles);
+ // Set up the reserved register for 0.0.
+ __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0);
// Push a frame with special values setup to mark it as an entry frame.
// r3: code entry
@@ -1345,20 +1353,16 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Reset the stack to the callee saved registers.
__ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-// Restore callee-saved registers and return.
-#ifdef DEBUG
- if (FLAG_debug_code) {
- Label here;
- __ b(&here, SetLK);
- __ bind(&here);
- }
-#endif
+ // Restore callee-saved double registers.
+ __ MultiPopDoubles(kCalleeSavedDoubles);
+ // Restore callee-saved registers.
__ MultiPop(kCalleeSaved);
+ // Return
__ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
- __ mtctr(r0);
- __ bctr();
+ __ mtlr(r0);
+ __ blr();
}
@@ -1676,7 +1680,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(r4);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments, 1, 1);
}
@@ -1967,10 +1971,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
- masm->isolate()),
- 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -2541,32 +2542,43 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
+ bool is_super) {
// r3 : number of arguments to the construct function
- // r5 : Feedback vector
- // r6 : slot in feedback vector (Smi)
// r4 : the function to call
+ // r5 : feedback vector
+ // r6 : slot in feedback vector (Smi)
+ // r7 : original constructor (for IsSuperConstructorCall)
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r3);
- __ Push(r6, r5, r4, r3);
+ if (is_super) {
+ __ Push(r6, r5, r4, r3, r7);
+ } else {
+ __ Push(r6, r5, r4, r3);
+ }
__ CallStub(stub);
- __ Pop(r6, r5, r4, r3);
+ if (is_super) {
+ __ Pop(r6, r5, r4, r3, r7);
+ } else {
+ __ Pop(r6, r5, r4, r3);
+ }
__ SmiUntag(r3);
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
+static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r3 : number of arguments to the construct function
// r4 : the function to call
- // r5 : Feedback vector
+ // r5 : feedback vector
// r6 : slot in feedback vector (Smi)
+ // r7 : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2574,24 +2586,24 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
- // Load the cache state into r7.
- __ SmiToPtrArrayOffset(r7, r6);
- __ add(r7, r5, r7);
- __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
+ // Load the cache state into r8.
+ __ SmiToPtrArrayOffset(r8, r6);
+ __ add(r8, r5, r8);
+ __ LoadP(r8, FieldMemOperand(r8, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- // We don't know if r7 is a WeakCell or a Symbol, but it's harmless to read at
+ // We don't know if r8 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in type-feedback-vector.h).
Label check_allocation_site;
- Register feedback_map = r8;
- Register weak_value = r9;
- __ LoadP(weak_value, FieldMemOperand(r7, WeakCell::kValueOffset));
+ Register feedback_map = r9;
+ Register weak_value = r10;
+ __ LoadP(weak_value, FieldMemOperand(r8, WeakCell::kValueOffset));
__ cmp(r4, weak_value);
__ beq(&done);
- __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
+ __ CompareRoot(r8, Heap::kmegamorphic_symbolRootIndex);
__ beq(&done);
- __ LoadP(feedback_map, FieldMemOperand(r7, HeapObject::kMapOffset));
+ __ LoadP(feedback_map, FieldMemOperand(r8, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
__ bne(FLAG_pretenuring_call_new ? &miss : &check_allocation_site);
@@ -2609,8 +2621,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ bne(&miss);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
- __ cmp(r4, r7);
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ cmp(r4, r8);
__ bne(&megamorphic);
__ b(&done);
}
@@ -2619,15 +2631,15 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
+ __ CompareRoot(r8, Heap::kuninitialized_symbolRootIndex);
__ beq(&initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ SmiToPtrArrayOffset(r7, r6);
- __ add(r7, r5, r7);
+ __ SmiToPtrArrayOffset(r8, r6);
+ __ add(r8, r5, r8);
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
- __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
+ __ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
__ jmp(&done);
// An uninitialized cache is patched with the function
@@ -2635,22 +2647,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
if (!FLAG_pretenuring_call_new) {
// Make sure the function is the Array() function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
- __ cmp(r4, r7);
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ cmp(r4, r8);
__ bne(&not_array_function);
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub);
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ b(&done);
__ bind(&not_array_function);
}
CreateWeakCellStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub);
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ bind(&done);
}
@@ -2711,8 +2723,10 @@ static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
// Wrap the receiver and patch it back onto the stack.
{
FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(r4, r6);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ push(r4);
+ __ mr(r3, r6);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ pop(r4);
}
__ StoreP(r3, MemOperand(sp, argc * kPointerSize), r0);
@@ -2782,18 +2796,18 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r3 : number of arguments
// r4 : the function to call
// r5 : feedback vector
- // r6 : (only if r5 is not the megamorphic symbol) slot in feedback
- // vector (Smi)
+ // r6 : slot in feedback vector (Smi, for RecordCallTarget)
+ // r7 : original constructor (for IsSuperConstructorCall)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(r4, &non_function_call);
// Check that the function is a JSFunction.
- __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r4, r8, r8, JS_FUNCTION_TYPE);
__ bne(&slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ GenerateRecordCallTarget(masm, IsSuperConstructorCall());
__ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8);
@@ -2823,9 +2837,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// Pass function as original constructor.
if (IsSuperConstructorCall()) {
- __ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2));
- __ addi(r7, r7, Operand(kPointerSize));
- __ LoadPX(r6, MemOperand(sp, r7));
+ __ mr(r6, r7);
} else {
__ mr(r6, r4);
}
@@ -2840,11 +2852,11 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r3: number of arguments
// r4: called object
- // r7: object type
+ // r8: object type
Label do_call;
__ bind(&slow);
STATIC_ASSERT(JS_FUNCTION_PROXY_TYPE < 0xffffu);
- __ cmpi(r7, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmpi(r8, Operand(JS_FUNCTION_PROXY_TYPE));
__ bne(&non_function_call);
__ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ b(&do_call);
@@ -3076,11 +3088,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r4, r5, r6);
// Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
-
- ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
- __ CallExternalReference(miss, 3);
+ Runtime::FunctionId id = GetICState() == DEFAULT
+ ? Runtime::kCallIC_Miss
+ : Runtime::kCallIC_Customization_Miss;
+ __ CallRuntime(id, 3);
// Move result to r4 and exit the internal frame.
__ mr(r4, r3);
@@ -3184,12 +3195,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
- DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
- __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCode));
+ DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
+ __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCodeU));
__ ori(r0, r0, Operand(kSmiTagMask));
- __ and_(r0, code_, r0);
- __ cmpi(r0, Operand::Zero());
- __ bne(&slow_case_);
+ __ and_(r0, code_, r0, SetRC);
+ __ bne(&slow_case_, cr0);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged one-byte char code.
@@ -3465,7 +3475,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
__ bind(&single_char);
// r3: original string
@@ -3678,7 +3688,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3956,12 +3966,10 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
STATIC_ASSERT(kInternalizedTag == 0);
__ orx(tmp3, tmp1, tmp2);
__ andi(r0, tmp3, Operand(kIsNotInternalizedMask));
- __ bne(&is_symbol, cr0);
// Make sure r3 is non-zero. At this point input operands are
// guaranteed to be non-zero.
DCHECK(right.is(r3));
- __ Ret();
- __ bind(&is_symbol);
+ __ Ret(eq, cr0);
}
// Check that both strings are sequential one-byte.
@@ -3984,7 +3992,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ bind(&miss);
@@ -4036,15 +4044,12 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
-
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r4, r3);
__ Push(r4, r3);
__ LoadSmiLiteral(r0, Smi::FromInt(op()));
__ push(r0);
- __ CallExternalReference(miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss, 3);
// Compute the entry point of the rewritten stub.
__ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -4201,8 +4206,8 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(
__ srwi(scratch2, scratch2, Operand(Name::kHashShift));
__ and_(scratch2, scratch1, scratch2);
- // Scale the index by multiplying by the element size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ // Scale the index by multiplying by the entry size.
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
__ ShiftLeftImm(ip, scratch2, Operand(1));
__ add(scratch2, scratch2, ip);
@@ -4291,7 +4296,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ and_(index, mask, r0);
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ ShiftLeftImm(scratch, index, Operand(1));
__ add(index, index, scratch); // index *= 3.
@@ -4767,7 +4772,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
- false, receiver, name, feedback,
+ receiver, name, feedback,
receiver_map, scratch1, r10);
__ bind(&miss);
@@ -5030,12 +5035,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// sp[0] - last argument
Label normal_sequence;
if (mode == DONT_OVERRIDE) {
- DCHECK(FAST_SMI_ELEMENTS == 0);
- DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
- DCHECK(FAST_ELEMENTS == 2);
- DCHECK(FAST_HOLEY_ELEMENTS == 3);
- DCHECK(FAST_DOUBLE_ELEMENTS == 4);
- DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// is the low bit set? If so, we are holey and that is good.
__ andi(r0, r6, Operand(1));
@@ -5306,6 +5311,163 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context = cp;
+ Register result = r3;
+ Register slot = r5;
+
+ // Go up the context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ LoadP(result, ContextOperand(context, Context::PREVIOUS_INDEX));
+ context = result;
+ }
+
+ // Load the PropertyCell value at the specified slot.
+ __ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
+ __ add(result, context, r0);
+ __ LoadP(result, ContextOperand(result));
+ __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
+
+ // If the result is not the_hole, return. Otherwise, handle in the runtime.
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ __ Ret(ne);
+
+ // Fallback to runtime.
+ __ SmiTag(slot);
+ __ Push(slot);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+}
+
+
+void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register value = r3;
+ Register slot = r5;
+
+ Register cell = r4;
+ Register cell_details = r6;
+ Register cell_value = r7;
+ Register cell_value_map = r8;
+ Register scratch = r9;
+
+ Register context = cp;
+ Register context_temp = cell;
+
+ Label fast_heapobject_case, fast_smi_case, slow_case;
+
+ if (FLAG_debug_code) {
+ __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
+ __ Check(ne, kUnexpectedValue);
+ }
+
+ // Go up the context chain to the script context.
+ for (int i = 0; i < depth(); i++) {
+ __ LoadP(context_temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ context = context_temp;
+ }
+
+ // Load the PropertyCell at the specified slot.
+ __ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
+ __ add(cell, context, r0);
+ __ LoadP(cell, ContextOperand(cell));
+
+ // Load PropertyDetails for the cell (actually only the cell_type and kind).
+ __ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
+ __ SmiUntag(cell_details);
+ __ andi(cell_details, cell_details,
+ Operand(PropertyDetails::PropertyCellTypeField::kMask |
+ PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask));
+
+ // Check if PropertyCell holds mutable data.
+ Label not_mutable_data;
+ __ cmpi(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kMutable) |
+ PropertyDetails::KindField::encode(kData)));
+ __ bne(&not_mutable_data);
+ __ JumpIfSmi(value, &fast_smi_case);
+
+ __ bind(&fast_heapobject_case);
+ __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
+ // RecordWriteField clobbers the value register, so we copy it before the
+ // call.
+ __ mr(r6, value);
+ __ RecordWriteField(cell, PropertyCell::kValueOffset, r6, scratch,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Ret();
+
+ __ bind(&not_mutable_data);
+ // Check if PropertyCell value matches the new value (relevant for Constant,
+ // ConstantType and Undefined cells).
+ Label not_same_value;
+ __ LoadP(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
+ __ cmp(cell_value, value);
+ __ bne(&not_same_value);
+
+ // Make sure the PropertyCell is not marked READ_ONLY.
+ __ andi(r0, cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
+ __ bne(&slow_case, cr0);
+
+ if (FLAG_debug_code) {
+ Label done;
+ // This can only be true for Constant, ConstantType and Undefined cells,
+ // because we never store the_hole via this stub.
+ __ cmpi(cell_details,
+ Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstant) |
+ PropertyDetails::KindField::encode(kData)));
+ __ beq(&done);
+ __ cmpi(cell_details,
+ Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+ __ beq(&done);
+ __ cmpi(cell_details,
+ Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kUndefined) |
+ PropertyDetails::KindField::encode(kData)));
+ __ Check(eq, kUnexpectedValue);
+ __ bind(&done);
+ }
+ __ Ret();
+ __ bind(&not_same_value);
+
+ // Check if PropertyCell contains data with constant type (and is not
+ // READ_ONLY).
+ __ cmpi(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+ __ bne(&slow_case);
+
+ // Now either both old and new values must be smis or both must be heap
+ // objects with same map.
+ Label value_is_heap_object;
+ __ JumpIfNotSmi(value, &value_is_heap_object);
+ __ JumpIfNotSmi(cell_value, &slow_case);
+ // Old and new values are smis, no need for a write barrier here.
+ __ bind(&fast_smi_case);
+ __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
+ __ Ret();
+
+ __ bind(&value_is_heap_object);
+ __ JumpIfSmi(cell_value, &slow_case);
+
+ __ LoadP(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
+ __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ cmp(cell_value_map, scratch);
+ __ beq(&fast_heapobject_case);
+
+ // Fallback to runtime.
+ __ bind(&slow_case);
+ __ SmiTag(slot);
+ __ Push(slot, value);
+ __ TailCallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2, 1);
+}
+
+
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
diff --git a/deps/v8/src/ppc/code-stubs-ppc.h b/deps/v8/src/ppc/code-stubs-ppc.h
index 3c71a23ea2..85f3c9ca98 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.h
+++ b/deps/v8/src/ppc/code-stubs-ppc.h
@@ -5,6 +5,8 @@
#ifndef V8_PPC_CODE_STUBS_PPC_H_
#define V8_PPC_CODE_STUBS_PPC_H_
+#include "src/ppc/frames-ppc.h"
+
namespace v8 {
namespace internal {
@@ -181,7 +183,7 @@ class RecordWriteStub : public PlatformCodeStub {
masm->MultiPush(kJSCallerSaved & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
// Save all volatile FP registers except d0.
- masm->SaveFPRegs(sp, 1, DoubleRegister::kNumVolatileRegisters - 1);
+ masm->MultiPushDoubles(kCallerSavedDoubles & ~d0.bit());
}
}
@@ -189,7 +191,7 @@ class RecordWriteStub : public PlatformCodeStub {
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
// Restore all volatile FP registers except d0.
- masm->RestoreFPRegs(sp, 1, DoubleRegister::kNumVolatileRegisters - 1);
+ masm->MultiPopDoubles(kCallerSavedDoubles & ~d0.bit());
}
masm->MultiPop(kJSCallerSaved & ~scratch1_.bit());
masm->pop(r0);
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index aae38f4724..2238695587 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
#include "src/codegen.h"
@@ -613,7 +611,7 @@ CodeAgingHelper::CodeAgingHelper() {
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
- SmartPointer<CodePatcher> patcher(new CodePatcher(
+ base::SmartPointer<CodePatcher> patcher(new CodePatcher(
young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
diff --git a/deps/v8/src/ppc/codegen-ppc.h b/deps/v8/src/ppc/codegen-ppc.h
index 500bf600f9..f8da74eaa6 100644
--- a/deps/v8/src/ppc/codegen-ppc.h
+++ b/deps/v8/src/ppc/codegen-ppc.h
@@ -12,9 +12,6 @@ namespace v8 {
namespace internal {
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
diff --git a/deps/v8/src/ppc/constants-ppc.cc b/deps/v8/src/ppc/constants-ppc.cc
index f019089eca..56147b3c48 100644
--- a/deps/v8/src/ppc/constants-ppc.cc
+++ b/deps/v8/src/ppc/constants-ppc.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
#include "src/ppc/constants-ppc.h"
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index 6960a7aa1e..b304bad7ce 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -5,6 +5,12 @@
#ifndef V8_PPC_CONSTANTS_PPC_H_
#define V8_PPC_CONSTANTS_PPC_H_
+#include <stdint.h>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/globals.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ppc/cpu-ppc.cc b/deps/v8/src/ppc/cpu-ppc.cc
index 1a9390b333..079aa23403 100644
--- a/deps/v8/src/ppc/cpu-ppc.cc
+++ b/deps/v8/src/ppc/cpu-ppc.cc
@@ -3,7 +3,6 @@
// found in the LICENSE file.
// CPU specific code for ppc independent of OS goes here.
-#include "src/v8.h"
#if V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/debug-ppc.cc b/deps/v8/src/ppc/debug-ppc.cc
deleted file mode 100644
index 9e734452b8..0000000000
--- a/deps/v8/src/ppc/debug-ppc.cc
+++ /dev/null
@@ -1,261 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_PPC
-
-#include "src/codegen.h"
-#include "src/debug.h"
-
-namespace v8 {
-namespace internal {
-
-void BreakLocation::SetDebugBreakAtReturn() {
- // Patch the code changing the return from JS function sequence from
- //
- // LeaveFrame
- // blr
- //
- // to a call to the debug break return code.
- // this uses a FIXED_SEQUENCE to load an address constant
- //
- // mov r0, <address>
- // mtlr r0
- // blrl
- // bkpt
- //
- CodePatcher patcher(pc(), Assembler::kJSReturnSequenceInstructions);
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
- patcher.masm()->mov(
- v8::internal::r0,
- Operand(reinterpret_cast<intptr_t>(debug_info_->GetIsolate()
- ->builtins()
- ->Return_DebugBreak()
- ->entry())));
- patcher.masm()->mtctr(v8::internal::r0);
- patcher.masm()->bctrl();
- patcher.masm()->bkpt(0);
-}
-
-
-void BreakLocation::SetDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- // Patch the code changing the debug break slot code from
- //
- // ori r3, r3, 0
- // ori r3, r3, 0
- // ori r3, r3, 0
- // ori r3, r3, 0
- // ori r3, r3, 0
- //
- // to a call to the debug break code, using a FIXED_SEQUENCE.
- //
- // mov r0, <address>
- // mtlr r0
- // blrl
- //
- CodePatcher patcher(pc(), Assembler::kDebugBreakSlotInstructions);
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
- patcher.masm()->mov(
- v8::internal::r0,
- Operand(reinterpret_cast<intptr_t>(
- debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry())));
- patcher.masm()->mtctr(v8::internal::r0);
- patcher.masm()->bctrl();
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs) {
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Load padding words on stack.
- __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingValue));
- for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
- __ push(ip);
- }
- __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
- __ push(ip);
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- DCHECK((object_regs & ~kJSCallerSaved) == 0);
- DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
- DCHECK((object_regs & non_object_regs) == 0);
- if ((object_regs | non_object_regs) != 0) {
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = {r};
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ TestUnsignedSmiCandidate(reg, r0);
- __ Assert(eq, kUnableToEncodeValueAsSmi, cr0);
- }
- __ SmiTag(reg);
- }
- }
- __ MultiPush(object_regs | non_object_regs);
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ mov(r3, Operand::Zero()); // no arguments
- __ mov(r4, Operand(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(masm->isolate(), 1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- if ((object_regs | non_object_regs) != 0) {
- __ MultiPop(object_regs | non_object_regs);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = {r};
- if ((non_object_regs & (1 << r)) != 0) {
- __ SmiUntag(reg);
- }
- if (FLAG_debug_code &&
- (((object_regs | non_object_regs) & (1 << r)) == 0)) {
- __ mov(reg, Operand(kDebugZapValue));
- }
- }
- }
-
- // Don't bother removing padding bytes pushed on the stack
- // as the frame is going to be restored right away.
-
- // Leave the internal frame.
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ mov(ip, Operand(after_break_target));
- __ LoadP(ip, MemOperand(ip));
- __ JumpToJSEntry(ip);
-}
-
-
-void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallICStub
- // ----------- S t a t e -------------
- // -- r4 : function
- // -- r6 : slot in feedback array (smi)
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r4.bit() | r6.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // In places other than IC call sites it is expected that r3 is TOS which
- // is an object - this is not generally the case so this should be used with
- // care.
- Generate_DebugBreakCallHelper(masm, r3.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-ppc.cc).
- // ----------- S t a t e -------------
- // -- r4 : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r4.bit(), 0);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-ppc.cc)
- // ----------- S t a t e -------------
- // -- r3 : number of arguments (not smi)
- // -- r4 : constructor function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r4.bit(), r3.bit());
-}
-
-
-void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
- MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-ppc.cc)
- // ----------- S t a t e -------------
- // -- r3 : number of arguments (not smi)
- // -- r4 : constructor function
- // -- r5 : feedback array
- // -- r6 : feedback slot (smi)
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r4.bit() | r5.bit() | r6.bit(), r3.bit());
-}
-
-
-void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction. Avoid emitting
- // the trampoline pool in the debug break slot code.
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
- __ nop(MacroAssembler::DEBUG_BREAK_NOP);
- }
- DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
- masm->InstructionsGeneratedSince(&check_codesize));
-}
-
-
-void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0);
-}
-
-
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ mov(ip, Operand(restarter_frame_function_slot));
- __ li(r4, Operand::Zero());
- __ StoreP(r4, MemOperand(ip, 0));
-
- // Load the function pointer off of our current stack frame.
- __ LoadP(r4, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset -
- kPointerSize));
-
- // Pop return address and frame
- __ LeaveFrame(StackFrame::INTERNAL);
-
- // Load context from the function.
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
-
- // Get function code.
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Re-run JSFunction, r4 is function, cp is context.
- __ Jump(ip);
-}
-
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index 0db074d694..3e4511f78f 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/codegen.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/safepoint-table.h"
namespace v8 {
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 02ef88bc19..5d7de8a0b4 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -28,8 +28,6 @@
#include <stdio.h>
#include <string.h>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
#include "src/base/platform/platform.h"
@@ -243,6 +241,14 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
return 1;
}
+ case 'c': { // 'cr: condition register of branch instruction
+ int code = instr->Bits(20, 18);
+ if (code != 7) {
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, " cr%d", code);
+ }
+ return 2;
+ }
case 't': { // 'target: target of branch instructions
// target26 or target16
DCHECK(STRING_STARTS_WITH(format, "target"));
@@ -366,7 +372,10 @@ void Decoder::DecodeExt1(Instruction* instr) {
break;
}
case BCLRX: {
- switch (instr->Bits(25, 21) << 21) {
+ int bo = instr->Bits(25, 21) << 21;
+ int bi = instr->Bits(20, 16);
+ CRBit cond = static_cast<CRBit>(bi & (CRWIDTH - 1));
+ switch (bo) {
case DCBNZF: {
UnknownFormat(instr, "bclrx-dcbnzf");
break;
@@ -376,7 +385,20 @@ void Decoder::DecodeExt1(Instruction* instr) {
break;
}
case BF: {
- UnknownFormat(instr, "bclrx-bf");
+ switch (cond) {
+ case CR_EQ:
+ Format(instr, "bnelr'l'cr");
+ break;
+ case CR_GT:
+ Format(instr, "blelr'l'cr");
+ break;
+ case CR_LT:
+ Format(instr, "bgelr'l'cr");
+ break;
+ case CR_SO:
+ Format(instr, "bnsolr'l'cr");
+ break;
+ }
break;
}
case DCBNZT: {
@@ -388,7 +410,20 @@ void Decoder::DecodeExt1(Instruction* instr) {
break;
}
case BT: {
- UnknownFormat(instr, "bclrx-bt");
+ switch (cond) {
+ case CR_EQ:
+ Format(instr, "beqlr'l'cr");
+ break;
+ case CR_GT:
+ Format(instr, "bgtlr'l'cr");
+ break;
+ case CR_LT:
+ Format(instr, "bltlr'l'cr");
+ break;
+ case CR_SO:
+ Format(instr, "bsolr'l'cr");
+ break;
+ }
break;
}
case DCBNZ: {
@@ -400,11 +435,7 @@ void Decoder::DecodeExt1(Instruction* instr) {
break;
}
case BA: {
- if (instr->Bit(0) == 1) {
- Format(instr, "blrl");
- } else {
- Format(instr, "blr");
- }
+ Format(instr, "blr'l");
break;
}
}
@@ -1062,43 +1093,48 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
case BCX: {
int bo = instr->Bits(25, 21) << 21;
int bi = instr->Bits(20, 16);
- switch (bi) {
- case 2:
- case 30:
- if (BT == bo) {
- Format(instr, "beq'l'a 'target16");
- break;
- }
- if (BF == bo) {
- Format(instr, "bne'l'a 'target16");
- break;
+ CRBit cond = static_cast<CRBit>(bi & (CRWIDTH - 1));
+ switch (bo) {
+ case BT: { // Branch if condition true
+ switch (cond) {
+ case CR_EQ:
+ Format(instr, "beq'l'a'cr 'target16");
+ break;
+ case CR_GT:
+ Format(instr, "bgt'l'a'cr 'target16");
+ break;
+ case CR_LT:
+ Format(instr, "blt'l'a'cr 'target16");
+ break;
+ case CR_SO:
+ Format(instr, "bso'l'a'cr 'target16");
+ break;
}
- Format(instr, "bc'l'a 'target16");
break;
- case 29:
- if (BT == bo) {
- Format(instr, "bgt'l'a 'target16");
- break;
- }
- if (BF == bo) {
- Format(instr, "ble'l'a 'target16");
- break;
+ }
+ case BF: { // Branch if condition false
+ switch (cond) {
+ case CR_EQ:
+ Format(instr, "bne'l'a'cr 'target16");
+ break;
+ case CR_GT:
+ Format(instr, "ble'l'a'cr 'target16");
+ break;
+ case CR_LT:
+ Format(instr, "bge'l'a'cr 'target16");
+ break;
+ case CR_SO:
+ Format(instr, "bnso'l'a'cr 'target16");
+ break;
}
- Format(instr, "bc'l'a 'target16");
break;
- case 28:
- if (BT == bo) {
- Format(instr, "blt'l'a 'target16");
- break;
- }
- if (BF == bo) {
- Format(instr, "bge'l'a 'target16");
- break;
- }
- Format(instr, "bc'l'a 'target16");
+ }
+ case DCBNZ: { // Decrement CTR; branch if CTR != 0
+ Format(instr, "bdnz'l'a 'target16");
break;
+ }
default:
- Format(instr, "bc'l'a 'target16");
+ Format(instr, "bc'l'a'cr 'target16");
break;
}
break;
diff --git a/deps/v8/src/ppc/frames-ppc.cc b/deps/v8/src/ppc/frames-ppc.cc
index 1e54c46963..4437a167fc 100644
--- a/deps/v8/src/ppc/frames-ppc.cc
+++ b/deps/v8/src/ppc/frames-ppc.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
#include "src/assembler.h"
diff --git a/deps/v8/src/ppc/frames-ppc.h b/deps/v8/src/ppc/frames-ppc.h
index 0357731b4b..d5b6d3caa9 100644
--- a/deps/v8/src/ppc/frames-ppc.h
+++ b/deps/v8/src/ppc/frames-ppc.h
@@ -55,6 +55,43 @@ const RegList kCalleeSaved = 1 << 14 | // r14
const int kNumCalleeSaved = 18;
+const RegList kCallerSavedDoubles = 1 << 0 | // d0
+ 1 << 1 | // d1
+ 1 << 2 | // d2
+ 1 << 3 | // d3
+ 1 << 4 | // d4
+ 1 << 5 | // d5
+ 1 << 6 | // d6
+ 1 << 7 | // d7
+ 1 << 8 | // d8
+ 1 << 9 | // d9
+ 1 << 10 | // d10
+ 1 << 11 | // d11
+ 1 << 12 | // d12
+ 1 << 13; // d13
+
+const RegList kCalleeSavedDoubles = 1 << 14 | // d14
+ 1 << 15 | // d15
+ 1 << 16 | // d16
+ 1 << 17 | // d17
+ 1 << 18 | // d18
+ 1 << 19 | // d19
+ 1 << 20 | // d20
+ 1 << 21 | // d21
+ 1 << 22 | // d22
+ 1 << 23 | // d23
+ 1 << 24 | // d24
+ 1 << 25 | // d25
+ 1 << 26 | // d26
+ 1 << 27 | // d27
+ 1 << 28 | // d28
+ 1 << 29 | // d29
+ 1 << 30 | // d30
+ 1 << 31; // d31
+
+const int kNumCalleeSavedDoubles = 18;
+
+
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
const int kNumSafepointRegisters = 32;
@@ -148,12 +185,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
}
} // namespace v8::internal
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 2d0c55fd28..9f8292f96b 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
#include "src/interface-descriptors.h"
@@ -36,7 +34,11 @@ const Register VectorStoreICDescriptor::VectorRegister() { return r6; }
const Register StoreTransitionDescriptor::MapRegister() { return r6; }
-const Register ElementTransitionAndStoreDescriptor::MapRegister() { return r6; }
+const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r5; }
+
+
+const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r5; }
+const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r3; }
const Register InstanceofDescriptor::left() { return r3; }
@@ -62,6 +64,14 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
+void StoreTransitionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ MapRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5};
@@ -83,6 +93,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
}
+// static
+const Register ToObjectDescriptor::ReceiverRegister() { return r3; }
+
+
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3};
@@ -158,11 +172,11 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// r3 : number of arguments
// r4 : the function to call
// r5 : feedback vector
- // r6 : (only if r5 is not the megamorphic symbol) slot in feedback
- // vector (Smi)
+ // r6 : slot in feedback vector (Smi, for RecordCallTarget)
+ // r7 : original constructor (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {r3, r4, r5};
+ Register registers[] = {r3, r4, r7, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -329,11 +343,22 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
+void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r4, // math rounding function
+ r6, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void MathRoundVariantCallFromOptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
r4, // math rounding function
r6, // vector slot id
+ r7, // type vector
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.cc b/deps/v8/src/ppc/lithium-codegen-ppc.cc
index 8f4cd4637a..de416b3fdb 100644
--- a/deps/v8/src/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/ppc/lithium-codegen-ppc.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@@ -107,7 +105,7 @@ bool LCodeGen::GeneratePrologue() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop_at");
}
#endif
@@ -417,6 +415,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
+ AllowDeferredHandleDereference get_number;
DCHECK(literal->IsNumber());
__ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
} else if (r.IsDouble()) {
@@ -613,15 +612,23 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
if (op->IsStackSlot()) {
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
if (is_tagged) {
- translation->StoreStackSlot(op->index());
+ translation->StoreStackSlot(index);
} else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
+ translation->StoreUint32StackSlot(index);
} else {
- translation->StoreInt32StackSlot(op->index());
+ translation->StoreInt32StackSlot(index);
}
} else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
+ translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -2304,6 +2311,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ beq(instr->TrueLabel(chunk_));
}
+ if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ // SIMD value -> true.
+ Label not_simd;
+ __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
+ __ beq(instr->TrueLabel(chunk_));
+ }
+
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
@@ -3046,13 +3060,31 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
- PREMONOMORPHIC).code();
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+ SLOPPY, PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->result()).is(r3));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ Handle<Code> stub =
+ CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3165,7 +3197,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3269,10 +3301,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
DoubleRegister result = ToDoubleRegister(instr->result());
if (key_is_constant) {
__ Add(scratch0(), external_pointer, constant_key << element_size_shift,
@@ -3281,8 +3310,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
__ add(scratch0(), external_pointer, r0);
}
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
__ lfs(result, MemOperand(scratch0(), base_offset));
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ lfd(result, MemOperand(scratch0(), base_offset));
@@ -3293,7 +3321,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
constant_key, element_size_shift, base_offset);
switch (elements_kind) {
- case EXTERNAL_INT8_ELEMENTS:
case INT8_ELEMENTS:
if (key_is_constant) {
__ LoadByte(result, mem_operand, r0);
@@ -3302,8 +3329,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
}
__ extsb(result, result);
break;
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
if (key_is_constant) {
@@ -3312,7 +3337,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ lbzx(result, mem_operand);
}
break;
- case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
if (key_is_constant) {
__ LoadHalfWordArith(result, mem_operand, r0);
@@ -3320,7 +3344,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ lhax(result, mem_operand);
}
break;
- case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
if (key_is_constant) {
__ LoadHalfWord(result, mem_operand, r0);
@@ -3328,7 +3351,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ lhzx(result, mem_operand);
}
break;
- case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
if (key_is_constant) {
__ LoadWordArith(result, mem_operand, r0);
@@ -3336,7 +3358,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ lwax(result, mem_operand);
}
break;
- case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
if (key_is_constant) {
__ LoadWord(result, mem_operand, r0);
@@ -3351,8 +3372,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
@@ -3489,7 +3508,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3750,12 +3769,11 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- __ push(cp); // The context is the first argument.
__ Move(scratch0(), instr->hydrogen()->pairs());
__ push(scratch0());
__ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
__ push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kDeclareGlobals, 2, instr);
}
@@ -4456,6 +4474,29 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(cp));
+ DCHECK(ToRegister(instr->value())
+ .is(StoreGlobalViaContextDescriptor::ValueRegister()));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
+ Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
+ isolate(), depth, instr->language_mode()).code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ push(StoreGlobalViaContextDescriptor::ValueRegister());
+ __ CallRuntime(is_strict(instr->language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Representation representation = instr->hydrogen()->length()->representation();
DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
@@ -4517,10 +4558,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
int base_offset = instr->base_offset();
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
Register address = scratch0();
DoubleRegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
@@ -4534,8 +4572,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
__ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
__ add(address, external_pointer, r0);
}
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
__ frsp(double_scratch0(), value);
__ stfs(double_scratch0(), MemOperand(address, base_offset));
} else { // Storing doubles, not floats.
@@ -4547,9 +4584,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
constant_key, element_size_shift, base_offset);
switch (elements_kind) {
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_INT8_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
case INT8_ELEMENTS:
@@ -4559,8 +4593,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
__ stbx(value, mem_operand);
}
break;
- case EXTERNAL_INT16_ELEMENTS:
- case EXTERNAL_UINT16_ELEMENTS:
case INT16_ELEMENTS:
case UINT16_ELEMENTS:
if (key_is_constant) {
@@ -4569,8 +4601,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
__ sthx(value, mem_operand);
}
break;
- case EXTERNAL_INT32_ELEMENTS:
- case EXTERNAL_UINT32_ELEMENTS:
case INT32_ELEMENTS:
case UINT32_ELEMENTS:
if (key_is_constant) {
@@ -4581,8 +4611,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -4700,7 +4728,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases: external, fast double
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -5916,11 +5944,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
} else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label);
__ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
- __ bge(false_label);
- __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ ExtractBit(r0, scratch, Map::kIsUndetectable);
- __ cmpi(r0, Operand::Zero());
- final_branch_condition = eq;
+ final_branch_condition = lt;
} else if (String::Equals(type_name, factory->symbol_string())) {
__ JumpIfSmi(input, false_label);
@@ -5966,6 +5990,17 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
__ cmpi(r0, Operand::Zero());
final_branch_condition = eq;
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(type_name, factory->type##_string())) { \
+ __ JumpIfSmi(input, false_label); \
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
+ __ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \
+ final_branch_condition = eq;
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
+
} else {
__ b(false_label);
}
diff --git a/deps/v8/src/ppc/lithium-gap-resolver-ppc.cc b/deps/v8/src/ppc/lithium-gap-resolver-ppc.cc
index 3528bf53f9..16fb665dda 100644
--- a/deps/v8/src/ppc/lithium-gap-resolver-ppc.cc
+++ b/deps/v8/src/ppc/lithium-gap-resolver-ppc.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/ppc/lithium-codegen-ppc.h"
#include "src/ppc/lithium-gap-resolver-ppc.h"
diff --git a/deps/v8/src/ppc/lithium-gap-resolver-ppc.h b/deps/v8/src/ppc/lithium-gap-resolver-ppc.h
index 1b006db6af..7741080e55 100644
--- a/deps/v8/src/ppc/lithium-gap-resolver-ppc.h
+++ b/deps/v8/src/ppc/lithium-gap-resolver-ppc.h
@@ -5,8 +5,6 @@
#ifndef V8_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
#define V8_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
-#include "src/v8.h"
-
#include "src/lithium.h"
namespace v8 {
diff --git a/deps/v8/src/ppc/lithium-ppc.cc b/deps/v8/src/ppc/lithium-ppc.cc
index 4f15a60d5d..6841db5d32 100644
--- a/deps/v8/src/ppc/lithium-ppc.cc
+++ b/deps/v8/src/ppc/lithium-ppc.cc
@@ -4,8 +4,6 @@
#include <sstream>
-#include "src/v8.h"
-
#include "src/hydrogen-osr.h"
#include "src/lithium-inl.h"
#include "src/ppc/lithium-codegen-ppc.h"
@@ -343,6 +341,11 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
+void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d", depth(), slot_index());
+}
+
+
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -361,6 +364,12 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
+void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
+ value()->PrintTo(stream);
+}
+
+
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -1631,8 +1640,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsExternal()) {
- DCHECK(instr->left()->representation().IsExternal());
- DCHECK(instr->right()->representation().IsInteger32());
+ DCHECK(instr->IsConsistentExternalRepresentation());
DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
@@ -2108,6 +2116,15 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
+ HLoadGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ DCHECK(instr->slot_index() > 0);
+ LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
+ return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2176,7 +2193,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LInstruction* result = NULL;
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseRegister(instr->elements());
@@ -2194,10 +2211,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
bool needs_environment;
- if (instr->is_external() || instr->is_fixed_typed_array()) {
+ if (instr->is_fixed_typed_array()) {
// see LCodeGen::DoLoadKeyedExternalArray
- needs_environment = (elements_kind == EXTERNAL_UINT32_ELEMENTS ||
- elements_kind == UINT32_ELEMENTS) &&
+ needs_environment = elements_kind == UINT32_ELEMENTS &&
!instr->CheckFlag(HInstruction::kUint32);
} else {
// see LCodeGen::DoLoadKeyedFixedDoubleArray and
@@ -2231,7 +2247,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
DCHECK(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
@@ -2261,10 +2277,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(instr->elements_kind())));
- DCHECK((instr->is_fixed_typed_array() &&
- instr->elements()->representation().IsTagged()) ||
- (instr->is_external() &&
- instr->elements()->representation().IsExternal()));
+ DCHECK(instr->elements()->representation().IsExternal());
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* backing_store = UseRegister(instr->elements());
@@ -2388,6 +2401,19 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
+ HStoreGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), cp);
+ LOperand* value = UseFixed(instr->value(),
+ StoreGlobalViaContextDescriptor::ValueRegister());
+ DCHECK(instr->slot_index() > 0);
+
+ LStoreGlobalViaContext* result =
+ new (zone()) LStoreGlobalViaContext(context, value);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r4);
diff --git a/deps/v8/src/ppc/lithium-ppc.h b/deps/v8/src/ppc/lithium-ppc.h
index 853a6240d0..99ff9fedb7 100644
--- a/deps/v8/src/ppc/lithium-ppc.h
+++ b/deps/v8/src/ppc/lithium-ppc.h
@@ -102,6 +102,7 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
+ V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -143,6 +144,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
+ V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1585,13 +1587,9 @@ class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
- bool is_external() const { return hydrogen()->is_external(); }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@@ -1638,7 +1636,23 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
+ TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+
+class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ LOperand* context() { return inputs_[0]; }
+
+ int depth() const { return hydrogen()->depth(); }
+ int slot_index() const { return hydrogen()->slot_index(); }
};
@@ -2118,6 +2132,28 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
+class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreGlobalViaContext(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
+ "store-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int depth() { return hydrogen()->depth(); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
@@ -2126,13 +2162,9 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
inputs_[2] = value;
}
- bool is_external() const { return hydrogen()->is_external(); }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 7cd895583a..676cb2c60e 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -5,8 +5,6 @@
#include <assert.h> // For assert
#include <limits.h> // For LONG_MIN, LONG_MAX.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
#include "src/base/bits.h"
@@ -14,7 +12,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -176,26 +174,13 @@ void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
-void MacroAssembler::Ret(Condition cond) {
- DCHECK(cond == al);
- blr();
-}
-
-
-void MacroAssembler::Drop(int count, Condition cond) {
- DCHECK(cond == al);
+void MacroAssembler::Drop(int count) {
if (count > 0) {
Add(sp, sp, count * kPointerSize, r0);
}
}
-void MacroAssembler::Ret(int drop, Condition cond) {
- Drop(drop, cond);
- Ret(cond);
-}
-
-
void MacroAssembler::Call(Label* target) { b(target, SetLK); }
@@ -237,30 +222,59 @@ void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
}
-void MacroAssembler::MultiPush(RegList regs) {
+void MacroAssembler::MultiPush(RegList regs, Register location) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kPointerSize;
- subi(sp, sp, Operand(stack_offset));
- for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ subi(location, location, Operand(stack_offset));
+ for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kPointerSize;
- StoreP(ToRegister(i), MemOperand(sp, stack_offset));
+ StoreP(ToRegister(i), MemOperand(location, stack_offset));
}
}
}
-void MacroAssembler::MultiPop(RegList regs) {
+void MacroAssembler::MultiPop(RegList regs, Register location) {
int16_t stack_offset = 0;
- for (int16_t i = 0; i < kNumRegisters; i++) {
+ for (int16_t i = 0; i < Register::kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
- LoadP(ToRegister(i), MemOperand(sp, stack_offset));
+ LoadP(ToRegister(i), MemOperand(location, stack_offset));
stack_offset += kPointerSize;
}
}
- addi(sp, sp, Operand(stack_offset));
+ addi(location, location, Operand(stack_offset));
+}
+
+
+void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
+ int16_t num_to_push = NumberOfBitsSet(dregs);
+ int16_t stack_offset = num_to_push * kDoubleSize;
+
+ subi(location, location, Operand(stack_offset));
+ for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
+ if ((dregs & (1 << i)) != 0) {
+ DoubleRegister dreg = DoubleRegister::from_code(i);
+ stack_offset -= kDoubleSize;
+ stfd(dreg, MemOperand(location, stack_offset));
+ }
+ }
+}
+
+
+void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
+ if ((dregs & (1 << i)) != 0) {
+ DoubleRegister dreg = DoubleRegister::from_code(i);
+ lfd(dreg, MemOperand(location, stack_offset));
+ stack_offset += kDoubleSize;
+ }
+ }
+ addi(location, location, Operand(stack_offset));
}
@@ -497,7 +511,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
beq(&done, cr0);
} else {
DCHECK(and_then == kReturnAtEnd);
- beq(&done, cr0);
+ Ret(eq, cr0);
}
mflr(r0);
push(r0);
@@ -838,7 +852,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Optionally save all volatile double registers.
if (save_doubles) {
- SaveFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters);
+ MultiPushDoubles(kCallerSavedDoubles);
// Note that d0 will be accessible at
// fp - ExitFrameConstants::kFrameSize -
// kNumVolatileRegisters * kDoubleSize,
@@ -904,7 +918,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
const int offset =
(ExitFrameConstants::kFrameSize + kNumRegs * kDoubleSize);
addi(r6, fp, Operand(-offset));
- RestoreFPRegs(r6, 0, kNumRegs);
+ MultiPopDoubles(kCallerSavedDoubles, r6);
}
// Clear top frame.
@@ -1147,10 +1161,11 @@ void MacroAssembler::IsObjectNameType(Register object, Register scratch,
void MacroAssembler::DebugBreak() {
li(r3, Operand::Zero());
- mov(r4, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
+ mov(r4,
+ Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
CEntryStub ces(isolate(), 1);
DCHECK(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
}
@@ -1583,26 +1598,6 @@ void MacroAssembler::Allocate(Register object_size, Register result,
}
-void MacroAssembler::UndoAllocationInNewSpace(Register object,
- Register scratch) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- ClearRightImm(object, object, Operand(kHeapObjectTagSize));
-#ifdef DEBUG
- // Check that the object un-allocated is below the current top.
- mov(scratch, Operand(new_space_allocation_top));
- LoadP(scratch, MemOperand(scratch));
- cmp(object, scratch);
- Check(lt, kUndoAllocationOfNonAllocatedMemory);
-#endif
- // Write the address of the object to un-allocate as the current top.
- mov(scratch, Operand(new_space_allocation_top));
- StoreP(object, MemOperand(scratch));
-}
-
-
void MacroAssembler::AllocateTwoByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3,
@@ -2987,28 +2982,6 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
}
-void MacroAssembler::SaveFPRegs(Register location, int first, int count) {
- DCHECK(count > 0);
- int cur = first;
- subi(location, location, Operand(count * kDoubleSize));
- for (int i = 0; i < count; i++) {
- DoubleRegister reg = DoubleRegister::from_code(cur++);
- stfd(reg, MemOperand(location, i * kDoubleSize));
- }
-}
-
-
-void MacroAssembler::RestoreFPRegs(Register location, int first, int count) {
- DCHECK(count > 0);
- int cur = first + count - 1;
- for (int i = count - 1; i >= 0; i--) {
- DoubleRegister reg = DoubleRegister::from_code(cur--);
- lfd(reg, MemOperand(location, i * kDoubleSize));
- }
- addi(location, location, Operand(count * kDoubleSize));
-}
-
-
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
@@ -4557,23 +4530,35 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
Register scratch1,
Label* found) {
DCHECK(!scratch1.is(scratch0));
- Factory* factory = isolate()->factory();
Register current = scratch0;
- Label loop_again;
+ Label loop_again, end;
// scratch contained elements pointer.
mr(current, object);
+ LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ CompareRoot(current, Heap::kNullValueRootIndex);
+ beq(&end);
// Loop based on the map going up the prototype chain.
bind(&loop_again);
LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
+
+ STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ lbz(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
+ cmpi(scratch1, Operand(JS_OBJECT_TYPE));
+ blt(found);
+
lbz(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
cmpi(scratch1, Operand(DICTIONARY_ELEMENTS));
beq(found);
LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
- Cmpi(current, Operand(factory->null_value()), r0);
+ CompareRoot(current, Heap::kNullValueRootIndex);
bne(&loop_again);
+
+ bind(&end);
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index c0992c9171..64396bb3a4 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -13,6 +13,19 @@
namespace v8 {
namespace internal {
+// Give alias names to registers for calling conventions.
+const Register kReturnRegister0 = {kRegister_r3_Code};
+const Register kReturnRegister1 = {kRegister_r4_Code};
+const Register kJSFunctionRegister = {kRegister_r4_Code};
+const Register kContextRegister = {kRegister_r30_Code};
+const Register kInterpreterAccumulatorRegister = {kRegister_r3_Code};
+const Register kInterpreterRegisterFileRegister = {kRegister_r14_Code};
+const Register kInterpreterBytecodeOffsetRegister = {kRegister_r15_Code};
+const Register kInterpreterBytecodeArrayRegister = {kRegister_r16_Code};
+const Register kInterpreterDispatchTableRegister = {kRegister_r17_Code};
+const Register kRuntimeCallFunctionRegister = {kRegister_r4_Code};
+const Register kRuntimeCallArgCountRegister = {kRegister_r3_Code};
+
// ----------------------------------------------------------------------------
// Static helper functions
@@ -127,13 +140,17 @@ class MacroAssembler : public Assembler {
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
- void Ret(Condition cond = al);
+ void Ret() { blr(); }
+ void Ret(Condition cond, CRegister cr = cr7) { bclr(cond, cr); }
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
- void Drop(int count, Condition cond = al);
+ void Drop(int count);
- void Ret(int drop, Condition cond = al);
+ void Ret(int drop) {
+ Drop(drop);
+ blr();
+ }
void Call(Label* target);
@@ -148,8 +165,11 @@ class MacroAssembler : public Assembler {
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
- void MultiPush(RegList regs);
- void MultiPop(RegList regs);
+ void MultiPush(RegList regs, Register location = sp);
+ void MultiPop(RegList regs, Register location = sp);
+
+ void MultiPushDoubles(RegList dregs, Register location = sp);
+ void MultiPopDoubles(RegList dregs, Register location = sp);
// Load an object from the root table.
void LoadRoot(Register destination, Heap::RootListIndex index,
@@ -213,7 +233,7 @@ class MacroAssembler : public Assembler {
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
// The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
@@ -618,13 +638,6 @@ class MacroAssembler : public Assembler {
void Allocate(Register object_size, Register result, Register scratch1,
Register scratch2, Label* gc_required, AllocationFlags flags);
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. The caller must make sure that no pointers
- // are left to the object(s) no longer allocated as they would be invalid when
- // allocation is undone.
- void UndoAllocationInNewSpace(Register object, Register scratch);
-
-
void AllocateTwoByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);
@@ -891,12 +904,6 @@ class MacroAssembler : public Assembler {
bind(&label);
}
- // Pushes <count> double values to <location>, starting from d<first>.
- void SaveFPRegs(Register location, int first, int count);
-
- // Pops <count> double values from <location>, starting from d<first>.
- void RestoreFPRegs(Register location, int first, int count);
-
// ---------------------------------------------------------------------------
// Runtime calls
@@ -1533,7 +1540,7 @@ class CodePatcher {
enum FlushICache { FLUSH, DONT_FLUSH };
CodePatcher(byte* address, int instructions, FlushICache flush_cache = FLUSH);
- virtual ~CodePatcher();
+ ~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
@@ -1556,7 +1563,7 @@ class CodePatcher {
// -----------------------------------------------------------------------------
// Static helper functions.
-inline MemOperand ContextOperand(Register context, int index) {
+inline MemOperand ContextOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 261982c0b3..518f8fae75 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -6,8 +6,6 @@
#include <stdlib.h>
#include <cmath>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
#include "src/assembler.h"
@@ -1108,8 +1106,15 @@ void Simulator::WriteDW(intptr_t addr, int64_t value) {
// Returns the limit of the stack area to enable checking for stack overflows.
-uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin to prevent overrunning the stack when pushing values.
+uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
+ // The simulator uses a separate JS stack. If we have exhausted the C stack,
+ // we also drop down the JS limit to reflect the exhaustion on the JS stack.
+ if (GetCurrentStackPosition() < c_limit) {
+ return reinterpret_cast<uintptr_t>(get_sp());
+ }
+
+ // Otherwise the limit is the JS stack. Leave a safety margin to prevent
+ // overrunning the stack when pushing values.
return reinterpret_cast<uintptr_t>(stack_) + stack_protection_size_;
}
@@ -1562,9 +1567,8 @@ void Simulator::SetCR0(intptr_t result, bool setSO) {
}
-void Simulator::ExecuteBranchConditional(Instruction* instr) {
+void Simulator::ExecuteBranchConditional(Instruction* instr, BCType type) {
int bo = instr->Bits(25, 21) << 21;
- int offset = (instr->Bits(15, 2) << 18) >> 16;
int condition_bit = instr->Bits(20, 16);
int condition_mask = 0x80000000 >> condition_bit;
switch (bo) {
@@ -1572,46 +1576,47 @@ void Simulator::ExecuteBranchConditional(Instruction* instr) {
case DCBEZF: // Decrement CTR; branch if CTR == 0 and condition false
UNIMPLEMENTED();
case BF: { // Branch if condition false
- if (!(condition_reg_ & condition_mask)) {
- if (instr->Bit(0) == 1) { // LK flag set
- special_reg_lr_ = get_pc() + 4;
- }
- set_pc(get_pc() + offset);
- }
+ if (condition_reg_ & condition_mask) return;
break;
}
case DCBNZT: // Decrement CTR; branch if CTR != 0 and condition true
case DCBEZT: // Decrement CTR; branch if CTR == 0 and condition true
UNIMPLEMENTED();
case BT: { // Branch if condition true
- if (condition_reg_ & condition_mask) {
- if (instr->Bit(0) == 1) { // LK flag set
- special_reg_lr_ = get_pc() + 4;
- }
- set_pc(get_pc() + offset);
- }
+ if (!(condition_reg_ & condition_mask)) return;
break;
}
case DCBNZ: // Decrement CTR; branch if CTR != 0
case DCBEZ: // Decrement CTR; branch if CTR == 0
special_reg_ctr_ -= 1;
- if ((special_reg_ctr_ == 0) == (bo == DCBEZ)) {
- if (instr->Bit(0) == 1) { // LK flag set
- special_reg_lr_ = get_pc() + 4;
- }
- set_pc(get_pc() + offset);
- }
+ if ((special_reg_ctr_ == 0) != (bo == DCBEZ)) return;
break;
case BA: { // Branch always
- if (instr->Bit(0) == 1) { // LK flag set
- special_reg_lr_ = get_pc() + 4;
- }
- set_pc(get_pc() + offset);
break;
}
default:
UNIMPLEMENTED(); // Invalid encoding
}
+
+ intptr_t old_pc = get_pc();
+
+ switch (type) {
+ case BC_OFFSET: {
+ int offset = (instr->Bits(15, 2) << 18) >> 16;
+ set_pc(old_pc + offset);
+ break;
+ }
+ case BC_LINK_REG:
+ set_pc(special_reg_lr_);
+ break;
+ case BC_CTR_REG:
+ set_pc(special_reg_ctr_);
+ break;
+ }
+
+ if (instr->Bit(0) == 1) { // LK flag set
+ special_reg_lr_ = old_pc + 4;
+ }
}
@@ -1620,24 +1625,12 @@ void Simulator::ExecuteExt1(Instruction* instr) {
switch (instr->Bits(10, 1) << 1) {
case MCRF:
UNIMPLEMENTED(); // Not used by V8.
- case BCLRX: {
- // need to check BO flag
- intptr_t old_pc = get_pc();
- set_pc(special_reg_lr_);
- if (instr->Bit(0) == 1) { // LK flag set
- special_reg_lr_ = old_pc + 4;
- }
+ case BCLRX:
+ ExecuteBranchConditional(instr, BC_LINK_REG);
break;
- }
- case BCCTRX: {
- // need to check BO flag
- intptr_t old_pc = get_pc();
- set_pc(special_reg_ctr_);
- if (instr->Bit(0) == 1) { // LK flag set
- special_reg_lr_ = old_pc + 4;
- }
+ case BCCTRX:
+ ExecuteBranchConditional(instr, BC_CTR_REG);
break;
- }
case CRNOR:
case RFI:
case CRANDC:
@@ -3262,7 +3255,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
}
case BCX: {
- ExecuteBranchConditional(instr);
+ ExecuteBranchConditional(instr, BC_OFFSET);
break;
}
case BX: {
@@ -3712,6 +3705,9 @@ void Simulator::Execute() {
void Simulator::CallInternal(byte* entry) {
+ // Adjust JS-based stack limit to C-based stack limit.
+ isolate_->stack_guard()->AdjustStackLimitForSimulator();
+
// Prepare to execute the code at entry
#if ABI_USES_FUNCTION_DESCRIPTORS
// entry is the function descriptor
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index c92281682f..042b2ada2c 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -199,12 +199,12 @@ class Simulator {
void set_pc(intptr_t value);
intptr_t get_pc() const;
- Address get_sp() {
+ Address get_sp() const {
return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
}
// Accessor to the internal simulator stack area.
- uintptr_t StackLimit() const;
+ uintptr_t StackLimit(uintptr_t c_limit) const;
// Executes PPC instructions until the PC reaches end_sim_pc.
void Execute();
@@ -253,6 +253,8 @@ class Simulator {
end_sim_pc = -2
};
+ enum BCType { BC_OFFSET, BC_LINK_REG, BC_CTR_REG };
+
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
@@ -302,7 +304,7 @@ class Simulator {
void Trace(Instruction* instr);
void SetCR0(intptr_t result, bool setSO = false);
- void ExecuteBranchConditional(Instruction* instr);
+ void ExecuteBranchConditional(Instruction* instr, BCType type);
void ExecuteExt1(Instruction* instr);
bool ExecuteExt2_10bit(Instruction* instr);
bool ExecuteExt2_9bit_part1(Instruction* instr);
@@ -401,15 +403,14 @@ class Simulator {
// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. Setting the c_limit to indicate a very small
-// stack cause stack overflow errors, since the simulator ignores the input.
-// This is unlikely to be an issue in practice, though it might cause testing
-// trouble down the line.
+// the C-based native code. The JS-based limit normally points near the end of
+// the simulator stack. When the C-based limit is exhausted we reflect that by
+// lowering the JS-based limit as well, to make stack checks trigger.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit();
+ return Simulator::current(isolate)->StackLimit(c_limit);
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index b1852bf978..b1541f616a 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -91,12 +91,12 @@ PreParserExpression PreParserTraits::ParseV8Intrinsic(bool* ok) {
PreParserExpression PreParserTraits::ParseFunctionLiteral(
PreParserIdentifier name, Scanner::Location function_name_location,
- bool name_is_strict_reserved, FunctionKind kind,
+ FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
FunctionLiteral::ArityRestriction arity_restriction,
LanguageMode language_mode, bool* ok) {
return pre_parser_->ParseFunctionLiteral(
- name, function_name_location, name_is_strict_reserved, kind,
+ name, function_name_location, function_name_validity, kind,
function_token_position, type, arity_restriction, language_mode, ok);
}
@@ -198,7 +198,7 @@ PreParser::Statement PreParser::ParseStatementListItem(bool* ok) {
}
break;
case Token::LET:
- if (is_strict(language_mode())) {
+ if (allow_let()) {
return ParseVariableStatement(kStatementListItem, ok);
}
break;
@@ -253,10 +253,10 @@ void PreParser::ParseStatementList(int end_token, bool* ok,
if (directive_prologue) {
if (statement.IsUseStrictLiteral()) {
scope_->SetLanguageMode(
- static_cast<LanguageMode>(scope_->language_mode() | STRICT_BIT));
+ static_cast<LanguageMode>(scope_->language_mode() | STRICT));
} else if (statement.IsUseStrongLiteral() && allow_strong_mode()) {
scope_->SetLanguageMode(static_cast<LanguageMode>(
- scope_->language_mode() | STRICT_BIT | STRONG_BIT));
+ scope_->language_mode() | STRONG));
} else if (!statement.IsStringLiteral()) {
directive_prologue = false;
}
@@ -420,7 +420,9 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
bool is_strict_reserved = false;
Identifier name = ParseIdentifierOrStrictReservedWord(
&is_strict_reserved, CHECK_OK);
- ParseFunctionLiteral(name, scanner()->location(), is_strict_reserved,
+ ParseFunctionLiteral(name, scanner()->location(),
+ is_strict_reserved ? kFunctionNameIsStrictReserved
+ : kFunctionNameValidityUnknown,
is_generator ? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction,
pos, FunctionLiteral::DECLARATION,
@@ -458,7 +460,7 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) {
Expect(Token::LBRACE, CHECK_OK);
Statement final = Statement::Default();
while (peek() != Token::RBRACE) {
- if (is_strict(language_mode())) {
+ if (is_strict(language_mode()) || allow_harmony_sloppy()) {
final = ParseStatementListItem(CHECK_OK);
} else {
final = ParseStatement(CHECK_OK);
@@ -526,12 +528,13 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// existing pages. Therefore we keep allowing const with the old
// non-harmony semantics in sloppy mode.
Consume(Token::CONST);
- if (is_strict(language_mode())) {
+ if (is_strict(language_mode()) ||
+ (allow_harmony_sloppy() && !allow_legacy_const())) {
DCHECK(var_context != kStatement);
is_strict_const = true;
require_initializer = var_context != kForStatement;
}
- } else if (peek() == Token::LET && is_strict(language_mode())) {
+ } else if (peek() == Token::LET && allow_let()) {
Consume(Token::LET);
DCHECK(var_context != kStatement);
} else {
@@ -871,7 +874,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
if (peek() != Token::SEMICOLON) {
ForEachStatement::VisitMode mode;
if (peek() == Token::VAR || (peek() == Token::CONST && allow_const()) ||
- (peek() == Token::LET && is_strict(language_mode()))) {
+ (peek() == Token::LET && allow_let())) {
int decl_count;
Scanner::Location first_initializer_loc = Scanner::Location::invalid();
Scanner::Location bindings_loc = Scanner::Location::invalid();
@@ -910,11 +913,16 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
return Statement::Default();
}
} else {
+ int lhs_beg_pos = peek_position();
Expression lhs = ParseExpression(false, CHECK_OK);
+ int lhs_end_pos = scanner()->location().end_pos;
is_let_identifier_expression =
lhs.IsIdentifier() && lhs.AsIdentifier().IsLet();
if (CheckInOrOf(lhs.IsIdentifier(), &mode, ok)) {
if (!*ok) return Statement::Default();
+ lhs = CheckAndRewriteReferenceExpression(
+ lhs, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
+ kSyntaxError, CHECK_OK);
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
ParseSubStatement(CHECK_OK);
@@ -1029,8 +1037,8 @@ PreParser::Statement PreParser::ParseDebuggerStatement(bool* ok) {
PreParser::Expression PreParser::ParseFunctionLiteral(
Identifier function_name, Scanner::Location function_name_location,
- bool name_is_strict_reserved, FunctionKind kind, int function_token_pos,
- FunctionLiteral::FunctionType function_type,
+ FunctionNameValidity function_name_validity, FunctionKind kind,
+ int function_token_pos, FunctionLiteral::FunctionType function_type,
FunctionLiteral::ArityRestriction arity_restriction,
LanguageMode language_mode, bool* ok) {
// Function ::
@@ -1049,14 +1057,13 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
Expect(Token::LPAREN, CHECK_OK);
int start_position = scanner()->location().beg_pos;
function_scope->set_start_position(start_position);
- PreParserFormalParameterParsingState parsing_state(nullptr);
- int num_parameters =
- ParseFormalParameterList(&parsing_state, &formals_classifier, CHECK_OK);
+ PreParserFormalParameters formals(nullptr);
+ ParseFormalParameterList(&formals, &formals_classifier, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
int formals_end_position = scanner()->location().end_pos;
- CheckArityRestrictions(num_parameters, arity_restriction,
- parsing_state.has_rest, start_position,
+ CheckArityRestrictions(formals.arity, arity_restriction,
+ formals.has_rest, start_position,
formals_end_position, CHECK_OK);
// See Parser::ParseFunctionLiteral for more information about lazy parsing
@@ -1078,12 +1085,10 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
// Validate name and parameter names. We can do this only after parsing the
// function, since the function can declare itself strict.
- CheckFunctionName(language_mode, kind, function_name,
- name_is_strict_reserved, function_name_location, CHECK_OK);
- const bool strict_formal_parameters =
- !parsing_state.is_simple_parameter_list || IsConciseMethod(kind);
+ CheckFunctionName(language_mode, function_name, function_name_validity,
+ function_name_location, CHECK_OK);
const bool allow_duplicate_parameters =
- is_sloppy(language_mode) && !strict_formal_parameters;
+ is_sloppy(language_mode) && formals.is_simple && !IsConciseMethod(kind);
ValidateFormalParameters(&formals_classifier, language_mode,
allow_duplicate_parameters, CHECK_OK);
@@ -1148,7 +1153,7 @@ PreParserExpression PreParser::ParseClassLiteral(
Scope* scope = NewScope(scope_, BLOCK_SCOPE);
BlockState block_state(&scope_, scope);
scope_->SetLanguageMode(
- static_cast<LanguageMode>(class_language_mode | STRICT_BIT));
+ static_cast<LanguageMode>(class_language_mode | STRICT));
// TODO(marja): Make PreParser use scope names too.
// scope_->SetScopeName(name);
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index d9ef1ea31e..f8f20e530a 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -5,8 +5,6 @@
#ifndef V8_PREPARSER_H
#define V8_PREPARSER_H
-#include "src/v8.h"
-
#include "src/bailout-reason.h"
#include "src/expression-classifier.h"
#include "src/func-name-inferrer.h"
@@ -19,6 +17,23 @@
namespace v8 {
namespace internal {
+
+enum FunctionNameValidity {
+ kFunctionNameIsStrictReserved,
+ kSkipFunctionNameCheck,
+ kFunctionNameValidityUnknown
+};
+
+
+struct FormalParametersBase {
+ explicit FormalParametersBase(Scope* scope) : scope(scope) {}
+ Scope* scope;
+ bool has_rest = false;
+ bool is_simple = true;
+ int materialized_literals_count = 0;
+};
+
+
// Common base class shared between parser and pre-parser. Traits encapsulate
// the differences between Parser and PreParser:
@@ -54,6 +69,7 @@ namespace internal {
// typedef ExpressionList;
// typedef PropertyList;
// typedef FormalParameter;
+// typedef FormalParameters;
// // For constructing objects returned by the traversing functions.
// typedef Factory;
// };
@@ -67,11 +83,10 @@ class ParserBase : public Traits {
typedef typename Traits::Type::Expression ExpressionT;
typedef typename Traits::Type::Identifier IdentifierT;
typedef typename Traits::Type::FormalParameter FormalParameterT;
+ typedef typename Traits::Type::FormalParameters FormalParametersT;
typedef typename Traits::Type::FunctionLiteral FunctionLiteralT;
typedef typename Traits::Type::Literal LiteralT;
typedef typename Traits::Type::ObjectLiteralProperty ObjectLiteralPropertyT;
- typedef typename Traits::Type::FormalParameterParsingState
- FormalParameterParsingStateT;
ParserBase(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
v8::Extension* extension, AstValueFactory* ast_value_factory,
@@ -93,8 +108,10 @@ class ParserBase : public Traits {
allow_natives_(false),
allow_harmony_arrow_functions_(false),
allow_harmony_sloppy_(false),
- allow_harmony_computed_property_names_(false),
- allow_harmony_rest_params_(false),
+ allow_harmony_sloppy_function_(false),
+ allow_harmony_sloppy_let_(false),
+ allow_harmony_rest_parameters_(false),
+ allow_harmony_default_parameters_(false),
allow_harmony_spreadcalls_(false),
allow_harmony_destructuring_(false),
allow_harmony_spread_arrays_(false),
@@ -110,8 +127,10 @@ class ParserBase : public Traits {
ALLOW_ACCESSORS(natives);
ALLOW_ACCESSORS(harmony_arrow_functions);
ALLOW_ACCESSORS(harmony_sloppy);
- ALLOW_ACCESSORS(harmony_computed_property_names);
- ALLOW_ACCESSORS(harmony_rest_params);
+ ALLOW_ACCESSORS(harmony_sloppy_function);
+ ALLOW_ACCESSORS(harmony_sloppy_let);
+ ALLOW_ACCESSORS(harmony_rest_parameters);
+ ALLOW_ACCESSORS(harmony_default_parameters);
ALLOW_ACCESSORS(harmony_spreadcalls);
ALLOW_ACCESSORS(harmony_destructuring);
ALLOW_ACCESSORS(harmony_spread_arrays);
@@ -120,12 +139,6 @@ class ParserBase : public Traits {
ALLOW_ACCESSORS(legacy_const);
#undef ALLOW_ACCESSORS
- bool allow_harmony_modules() const { return scanner()->HarmonyModules(); }
- bool allow_harmony_unicode() const { return scanner()->HarmonyUnicode(); }
-
- void set_allow_harmony_modules(bool a) { scanner()->SetHarmonyModules(a); }
- void set_allow_harmony_unicode(bool a) { scanner()->SetHarmonyUnicode(a); }
-
protected:
enum AllowRestrictedIdentifiers {
kAllowRestrictedIdentifiers,
@@ -303,7 +316,7 @@ class ParserBase : public Traits {
Scope* NewScope(Scope* parent, ScopeType scope_type, FunctionKind kind) {
DCHECK(ast_value_factory());
- DCHECK(scope_type != MODULE_SCOPE || allow_harmony_modules());
+ DCHECK(scope_type != MODULE_SCOPE || FLAG_harmony_modules);
DCHECK(!IsArrowFunction(kind) || scope_type == ARROW_SCOPE);
Scope* result = new (zone())
Scope(zone(), parent, scope_type, ast_value_factory(), kind);
@@ -449,13 +462,10 @@ class ParserBase : public Traits {
// Checking the name of a function literal. This has to be done after parsing
// the function, since the function can declare itself strict.
- void CheckFunctionName(LanguageMode language_mode, FunctionKind kind,
- IdentifierT function_name,
- bool function_name_is_strict_reserved,
- const Scanner::Location& function_name_loc,
- bool* ok) {
- // Property names are never checked.
- if (IsConciseMethod(kind) || IsAccessorFunction(kind)) return;
+ void CheckFunctionName(LanguageMode language_mode, IdentifierT function_name,
+ FunctionNameValidity function_name_validity,
+ const Scanner::Location& function_name_loc, bool* ok) {
+ if (function_name_validity == kSkipFunctionNameCheck) return;
// The function name needs to be checked in strict mode.
if (is_sloppy(language_mode)) return;
@@ -465,7 +475,7 @@ class ParserBase : public Traits {
*ok = false;
return;
}
- if (function_name_is_strict_reserved) {
+ if (function_name_validity == kFunctionNameIsStrictReserved) {
Traits::ReportMessageAt(function_name_loc,
MessageTemplate::kUnexpectedStrictReserved);
*ok = false;
@@ -494,7 +504,12 @@ class ParserBase : public Traits {
bool is_generator() const { return function_state_->is_generator(); }
bool allow_const() {
- return is_strict(language_mode()) || allow_legacy_const();
+ return is_strict(language_mode()) || allow_harmony_sloppy() ||
+ allow_legacy_const();
+ }
+
+ bool allow_let() {
+ return is_strict(language_mode()) || allow_harmony_sloppy_let();
}
// Report syntax errors.
@@ -674,7 +689,7 @@ class ParserBase : public Traits {
ExpressionT ParseMemberExpressionContinuation(
ExpressionT expression, ExpressionClassifier* classifier, bool* ok);
ExpressionT ParseArrowFunctionLiteral(
- const FormalParameterParsingStateT& parsing_state,
+ const FormalParametersT& parameters,
const ExpressionClassifier& classifier, bool* ok);
ExpressionT ParseTemplateLiteral(ExpressionT tag, int start,
ExpressionClassifier* classifier, bool* ok);
@@ -687,11 +702,10 @@ class ParserBase : public Traits {
ExpressionT ParseStrongSuperCallExpression(ExpressionClassifier* classifier,
bool* ok);
- void ParseFormalParameter(bool is_rest,
- FormalParameterParsingStateT* parsing_result,
+ void ParseFormalParameter(FormalParametersT* parameters,
ExpressionClassifier* classifier, bool* ok);
- int ParseFormalParameterList(FormalParameterParsingStateT* parsing_state,
- ExpressionClassifier* classifier, bool* ok);
+ void ParseFormalParameterList(FormalParametersT* parameters,
+ ExpressionClassifier* classifier, bool* ok);
void CheckArityRestrictions(
int param_count, FunctionLiteral::ArityRestriction arity_restriction,
bool has_rest, int formals_start_pos, int formals_end_pos, bool* ok);
@@ -700,8 +714,11 @@ class ParserBase : public Traits {
// left-hand side of assignments). Although ruled out by ECMA as early errors,
// we allow calls for web compatibility and rewrite them to a runtime throw.
ExpressionT CheckAndRewriteReferenceExpression(
- ExpressionT expression, Scanner::Location location,
+ ExpressionT expression, int beg_pos, int end_pos,
MessageTemplate::Template message, bool* ok);
+ ExpressionT CheckAndRewriteReferenceExpression(
+ ExpressionT expression, int beg_pos, int end_pos,
+ MessageTemplate::Template message, ParseErrorType type, bool* ok);
// Used to validate property names in object literals and class literals
enum PropertyKind {
@@ -787,8 +804,10 @@ class ParserBase : public Traits {
bool allow_natives_;
bool allow_harmony_arrow_functions_;
bool allow_harmony_sloppy_;
- bool allow_harmony_computed_property_names_;
- bool allow_harmony_rest_params_;
+ bool allow_harmony_sloppy_function_;
+ bool allow_harmony_sloppy_let_;
+ bool allow_harmony_rest_parameters_;
+ bool allow_harmony_default_parameters_;
bool allow_harmony_spreadcalls_;
bool allow_harmony_destructuring_;
bool allow_harmony_spread_arrays_;
@@ -937,10 +956,16 @@ class PreParserExpression {
ExpressionTypeField::encode(kCallExpression));
}
+ static PreParserExpression SuperCallReference() {
+ return PreParserExpression(
+ TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kSuperCallReference));
+ }
+
static PreParserExpression NoTemplateTag() {
- return PreParserExpression(TypeField::encode(kExpression) |
- ExpressionTypeField::encode(
- kNoTemplateTagExpression));
+ return PreParserExpression(
+ TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kNoTemplateTagExpression));
}
bool IsIdentifier() const {
@@ -987,6 +1012,11 @@ class PreParserExpression {
ExpressionTypeField::decode(code_) == kCallExpression;
}
+ bool IsSuperCallReference() const {
+ return TypeField::decode(code_) == kExpression &&
+ ExpressionTypeField::decode(code_) == kSuperCallReference;
+ }
+
bool IsValidReferenceExpression() const {
return IsIdentifier() || IsProperty();
}
@@ -1035,6 +1065,7 @@ class PreParserExpression {
kThisPropertyExpression,
kPropertyExpression,
kCallExpression,
+ kSuperCallReference,
kNoTemplateTagExpression
};
@@ -1165,6 +1196,11 @@ class PreParserFactory {
int pos) {
return PreParserExpression::Default();
}
+ PreParserExpression NewArrayLiteral(PreParserExpressionList values,
+ int first_spread_index, int literal_index,
+ bool is_strong, int pos) {
+ return PreParserExpression::Default();
+ }
PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
PreParserExpression value,
ObjectLiteralProperty::Kind kind,
@@ -1282,16 +1318,13 @@ class PreParserFactory {
};
-struct PreParserFormalParameterParsingState {
- explicit PreParserFormalParameterParsingState(Scope* scope)
- : scope(scope),
- has_rest(false),
- is_simple_parameter_list(true),
- materialized_literals_count(0) {}
- Scope* scope;
- bool has_rest;
- bool is_simple_parameter_list;
- int materialized_literals_count;
+struct PreParserFormalParameters : FormalParametersBase {
+ explicit PreParserFormalParameters(Scope* scope)
+ : FormalParametersBase(scope) {}
+ int arity = 0;
+
+ int Arity() const { return arity; }
+ PreParserIdentifier at(int i) { return PreParserIdentifier(); } // Dummy
};
@@ -1320,8 +1353,8 @@ class PreParserTraits {
typedef PreParserExpressionList ExpressionList;
typedef PreParserExpressionList PropertyList;
typedef PreParserIdentifier FormalParameter;
+ typedef PreParserFormalParameters FormalParameters;
typedef PreParserStatementList StatementList;
- typedef PreParserFormalParameterParsingState FormalParameterParsingState;
// For constructing objects returned by the traversing functions.
typedef PreParserFactory Factory;
@@ -1513,7 +1546,7 @@ class PreParserTraits {
static PreParserExpression SuperCallReference(Scope* scope,
PreParserFactory* factory,
int pos) {
- return PreParserExpression::Default();
+ return PreParserExpression::SuperCallReference();
}
static PreParserExpression NewTargetExpression(Scope* scope,
@@ -1561,7 +1594,7 @@ class PreParserTraits {
}
static void AddParameterInitializationBlock(
- const PreParserFormalParameterParsingState& formal_parameters,
+ const PreParserFormalParameters& parameters,
PreParserStatementList list, bool* ok) {}
V8_INLINE void SkipLazyFunctionBody(int* materialized_literal_count,
@@ -1571,16 +1604,15 @@ class PreParserTraits {
V8_INLINE PreParserStatementList ParseEagerFunctionBody(
PreParserIdentifier function_name, int pos,
- const PreParserFormalParameterParsingState& formal_parameters,
- Variable* fvar, Token::Value fvar_init_op, FunctionKind kind, bool* ok);
+ const PreParserFormalParameters& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok);
- V8_INLINE void ParseArrowFunctionFormalParameters(
- PreParserFormalParameterParsingState* parsing_state,
+ V8_INLINE void ParseArrowFunctionFormalParameterList(
+ PreParserFormalParameters* parameters,
PreParserExpression expression, const Scanner::Location& params_loc,
Scanner::Location* duplicate_loc, bool* ok);
- void ReindexLiterals(
- const PreParserFormalParameterParsingState& parsing_state) {}
+ void ReindexLiterals(const PreParserFormalParameters& paramaters) {}
struct TemplateLiteralState {};
@@ -1606,8 +1638,14 @@ class PreParserTraits {
return !tag.IsNoTemplateTag();
}
- void DeclareFormalParameter(void* parsing_state, PreParserExpression pattern,
- ExpressionClassifier* classifier, bool is_rest) {}
+ void AddFormalParameter(
+ PreParserFormalParameters* parameters, PreParserExpression pattern,
+ PreParserExpression initializer, bool is_rest) {
+ ++parameters->arity;
+ }
+ void DeclareFormalParameter(Scope* scope, PreParserIdentifier parameter,
+ bool is_simple,
+ ExpressionClassifier* classifier) {}
void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
@@ -1615,7 +1653,7 @@ class PreParserTraits {
PreParserExpression ParseV8Intrinsic(bool* ok);
PreParserExpression ParseFunctionLiteral(
PreParserIdentifier name, Scanner::Location function_name_location,
- bool name_is_strict_reserved, FunctionKind kind,
+ FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
FunctionLiteral::ArityRestriction arity_restriction,
LanguageMode language_mode, bool* ok);
@@ -1756,16 +1794,15 @@ class PreParser : public ParserBase<PreParserTraits> {
V8_INLINE void SkipLazyFunctionBody(int* materialized_literal_count,
int* expected_property_count, bool* ok);
- V8_INLINE PreParserStatementList
- ParseEagerFunctionBody(PreParserIdentifier function_name, int pos,
- const FormalParameterParsingStateT& formal_parameters,
- Variable* fvar, Token::Value fvar_init_op,
- FunctionKind kind, bool* ok);
+ V8_INLINE PreParserStatementList ParseEagerFunctionBody(
+ PreParserIdentifier function_name, int pos,
+ const PreParserFormalParameters& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok);
Expression ParseFunctionLiteral(
Identifier name, Scanner::Location function_name_location,
- bool name_is_strict_reserved, FunctionKind kind, int function_token_pos,
- FunctionLiteral::FunctionType function_type,
+ FunctionNameValidity function_name_validity, FunctionKind kind,
+ int function_token_pos, FunctionLiteral::FunctionType function_type,
FunctionLiteral::ArityRestriction arity_restriction,
LanguageMode language_mode, bool* ok);
void ParseLazyFunctionLiteralBody(bool* ok,
@@ -1804,8 +1841,8 @@ PreParserExpression PreParserTraits::SpreadCallNew(PreParserExpression function,
}
-void PreParserTraits::ParseArrowFunctionFormalParameters(
- PreParserFormalParameterParsingState* parsing_state,
+void PreParserTraits::ParseArrowFunctionFormalParameterList(
+ PreParserFormalParameters* parameters,
PreParserExpression params, const Scanner::Location& params_loc,
Scanner::Location* duplicate_loc, bool* ok) {
// TODO(wingo): Detect duplicated identifiers in paramlists. Detect parameter
@@ -1815,8 +1852,8 @@ void PreParserTraits::ParseArrowFunctionFormalParameters(
PreParserStatementList PreParser::ParseEagerFunctionBody(
PreParserIdentifier function_name, int pos,
- const PreParserFormalParameterParsingState& formal_parameters,
- Variable* fvar, Token::Value fvar_init_op, FunctionKind kind, bool* ok) {
+ const PreParserFormalParameters& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok) {
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
ParseStatementList(Token::RBRACE, ok);
@@ -1829,10 +1866,10 @@ PreParserStatementList PreParser::ParseEagerFunctionBody(
PreParserStatementList PreParserTraits::ParseEagerFunctionBody(
PreParserIdentifier function_name, int pos,
- const PreParserFormalParameterParsingState& formal_parameters,
- Variable* fvar, Token::Value fvar_init_op, FunctionKind kind, bool* ok) {
- return pre_parser_->ParseEagerFunctionBody(
- function_name, pos, formal_parameters, fvar, fvar_init_op, kind, ok);
+ const PreParserFormalParameters& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok) {
+ return pre_parser_->ParseEagerFunctionBody(function_name, pos, parameters,
+ kind, function_type, ok);
}
@@ -2162,7 +2199,7 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
case Token::SMI:
case Token::NUMBER:
classifier->RecordBindingPatternError(
- scanner()->location(), MessageTemplate::kUnexpectedTokenNumber);
+ scanner()->peek_location(), MessageTemplate::kUnexpectedTokenNumber);
Next();
result =
this->ExpressionFromLiteral(token, beg_pos, scanner(), factory());
@@ -2182,17 +2219,21 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
case Token::STRING: {
classifier->RecordBindingPatternError(
- scanner()->location(), MessageTemplate::kUnexpectedTokenString);
+ scanner()->peek_location(), MessageTemplate::kUnexpectedTokenString);
Consume(Token::STRING);
result = this->ExpressionFromString(beg_pos, scanner(), factory());
break;
}
case Token::ASSIGN_DIV:
+ classifier->RecordBindingPatternError(
+ scanner()->peek_location(), MessageTemplate::kUnexpectedTokenRegExp);
result = this->ParseRegExpLiteral(true, classifier, CHECK_OK);
break;
case Token::DIV:
+ classifier->RecordBindingPatternError(
+ scanner()->peek_location(), MessageTemplate::kUnexpectedTokenRegExp);
result = this->ParseRegExpLiteral(false, classifier, CHECK_OK);
break;
@@ -2235,22 +2276,24 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
}
Scope* scope =
this->NewScope(scope_, ARROW_SCOPE, FunctionKind::kArrowFunction);
- FormalParameterParsingStateT parsing_state(scope);
+ FormalParametersT parameters(scope);
scope->set_start_position(beg_pos);
ExpressionClassifier args_classifier;
- result = this->ParseArrowFunctionLiteral(parsing_state, args_classifier,
+ result = this->ParseArrowFunctionLiteral(parameters, args_classifier,
CHECK_OK);
} else if (allow_harmony_arrow_functions() &&
- allow_harmony_rest_params() && Check(Token::ELLIPSIS)) {
+ allow_harmony_rest_parameters() && Check(Token::ELLIPSIS)) {
// (...x) => y
Scope* scope =
this->NewScope(scope_, ARROW_SCOPE, FunctionKind::kArrowFunction);
- FormalParameterParsingStateT parsing_state(scope);
+ FormalParametersT formals(scope);
scope->set_start_position(beg_pos);
- ExpressionClassifier args_classifier;
- const bool is_rest = true;
- this->ParseFormalParameter(is_rest, &parsing_state, &args_classifier,
- CHECK_OK);
+ ExpressionClassifier formals_classifier;
+ formals.has_rest = true;
+ this->ParseFormalParameter(&formals, &formals_classifier, CHECK_OK);
+ Traits::DeclareFormalParameter(
+ formals.scope, formals.at(0), formals.is_simple,
+ &formals_classifier);
if (peek() == Token::COMMA) {
ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kParamAfterRest);
@@ -2258,7 +2301,7 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
return this->EmptyExpression();
}
Expect(Token::RPAREN, CHECK_OK);
- result = this->ParseArrowFunctionLiteral(parsing_state, args_classifier,
+ result = this->ParseArrowFunctionLiteral(formals, formals_classifier,
CHECK_OK);
} else {
// Heuristically try to detect immediately called functions before
@@ -2294,6 +2337,9 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL:
+ classifier->RecordBindingPatternError(
+ scanner()->peek_location(),
+ MessageTemplate::kUnexpectedTemplateString);
result = this->ParseTemplateLiteral(Traits::NoTemplateTag(), beg_pos,
classifier, CHECK_OK);
break;
@@ -2350,7 +2396,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
}
Consume(Token::COMMA);
bool is_rest = false;
- if (allow_harmony_rest_params() && peek() == Token::ELLIPSIS) {
+ if (allow_harmony_rest_parameters() && peek() == Token::ELLIPSIS) {
// 'x, y, ...z' in CoverParenthesizedExpressionAndArrowParameterList only
// as the formal parameters of'(x, y, ...z) => foo', and is not itself a
// valid expression or binding pattern.
@@ -2380,6 +2426,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
int pos = peek_position();
typename Traits::Type::ExpressionList values =
this->NewExpressionList(4, zone_);
+ int first_spread_index = -1;
Expect(Token::LBRACK, CHECK_OK);
while (peek() != Token::RBRACK) {
bool seen_spread = false;
@@ -2402,6 +2449,9 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
this->ParseAssignmentExpression(true, classifier, CHECK_OK);
elem = factory()->NewSpread(argument, start_pos);
seen_spread = true;
+ if (first_spread_index < 0) {
+ first_spread_index = values->length();
+ }
} else {
elem = this->ParseAssignmentExpression(true, classifier, CHECK_OK);
}
@@ -2418,7 +2468,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
// Update the scope information before the pre-parsing bailout.
int literal_index = function_state_->NextMaterializedLiteralIndex();
- return factory()->NewArrayLiteral(values, literal_index,
+ return factory()->NewArrayLiteral(values, first_spread_index, literal_index,
is_strong(language_mode()), pos);
}
@@ -2455,19 +2505,18 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParsePropertyName(
*name = this->GetNumberAsSymbol(scanner());
break;
- case Token::LBRACK:
- if (allow_harmony_computed_property_names_) {
- *is_computed_name = true;
- Consume(Token::LBRACK);
- ExpressionClassifier computed_name_classifier;
- ExpressionT expression = ParseAssignmentExpression(
- true, &computed_name_classifier, CHECK_OK);
- classifier->AccumulateReclassifyingAsPattern(computed_name_classifier);
- Expect(Token::RBRACK, CHECK_OK);
- return expression;
- }
+ case Token::LBRACK: {
+ *is_computed_name = true;
+ Consume(Token::LBRACK);
+ ExpressionClassifier computed_name_classifier;
+ ExpressionT expression =
+ ParseAssignmentExpression(true, &computed_name_classifier, CHECK_OK);
+ classifier->Accumulate(computed_name_classifier,
+ ExpressionClassifier::ExpressionProduction);
+ Expect(Token::RBRACK, CHECK_OK);
+ return expression;
+ }
- // Fall through.
case Token::STATIC:
*is_static = true;
@@ -2540,9 +2589,8 @@ ParserBase<Traits>::ParsePropertyDefinition(
if (!in_class) kind = WithObjectLiteralBit(kind);
value = this->ParseFunctionLiteral(
- name, scanner()->location(),
- false, // reserved words are allowed here
- kind, RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
+ name, scanner()->location(), kSkipFunctionNameCheck, kind,
+ RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
FunctionLiteral::NORMAL_ARITY, language_mode(),
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
@@ -2573,9 +2621,8 @@ ParserBase<Traits>::ParsePropertyDefinition(
FunctionKind kind = FunctionKind::kAccessorFunction;
if (!in_class) kind = WithObjectLiteralBit(kind);
typename Traits::Type::FunctionLiteral value = this->ParseFunctionLiteral(
- name, scanner()->location(),
- false, // reserved words are allowed here
- kind, RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
+ name, scanner()->location(), kSkipFunctionNameCheck, kind,
+ RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
is_get ? FunctionLiteral::GETTER_ARITY : FunctionLiteral::SETTER_ARITY,
language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
@@ -2610,7 +2657,8 @@ ParserBase<Traits>::ParsePropertyDefinition(
ExpressionClassifier rhs_classifier;
ExpressionT rhs = this->ParseAssignmentExpression(
true, &rhs_classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- classifier->AccumulateReclassifyingAsPattern(rhs_classifier);
+ classifier->Accumulate(rhs_classifier,
+ ExpressionClassifier::ExpressionProduction);
value = factory()->NewAssignment(Token::ASSIGN, lhs, rhs,
RelocInfo::kNoPosition);
} else {
@@ -2777,7 +2825,7 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
// YieldExpression
// LeftHandSideExpression AssignmentOperator AssignmentExpression
- Scanner::Location lhs_location = scanner()->peek_location();
+ int lhs_beg_pos = peek_position();
if (peek() == Token::YIELD && is_generator()) {
return this->ParseYieldExpression(classifier, ok);
@@ -2797,22 +2845,22 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
BindingPatternUnexpectedToken(classifier);
ValidateArrowFormalParameters(&arrow_formals_classifier, expression,
parenthesized_formals, CHECK_OK);
- Scanner::Location loc(lhs_location.beg_pos, scanner()->location().end_pos);
+ Scanner::Location loc(lhs_beg_pos, scanner()->location().end_pos);
Scope* scope =
this->NewScope(scope_, ARROW_SCOPE, FunctionKind::kArrowFunction);
- FormalParameterParsingStateT parsing_state(scope);
- checkpoint.Restore(&parsing_state.materialized_literals_count);
+ FormalParametersT parameters(scope);
+ checkpoint.Restore(&parameters.materialized_literals_count);
- scope->set_start_position(lhs_location.beg_pos);
+ scope->set_start_position(lhs_beg_pos);
Scanner::Location duplicate_loc = Scanner::Location::invalid();
- this->ParseArrowFunctionFormalParameters(&parsing_state, expression, loc,
- &duplicate_loc, CHECK_OK);
+ this->ParseArrowFunctionFormalParameterList(&parameters, expression, loc,
+ &duplicate_loc, CHECK_OK);
if (duplicate_loc.IsValid()) {
arrow_formals_classifier.RecordDuplicateFormalParameterError(
duplicate_loc);
}
expression = this->ParseArrowFunctionLiteral(
- parsing_state, arrow_formals_classifier, CHECK_OK);
+ parameters, arrow_formals_classifier, CHECK_OK);
return expression;
}
@@ -2828,13 +2876,13 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
return expression;
}
- if (!allow_harmony_destructuring()) {
+ if (!(allow_harmony_destructuring() || allow_harmony_default_parameters())) {
BindingPatternUnexpectedToken(classifier);
}
expression = this->CheckAndRewriteReferenceExpression(
- expression, lhs_location, MessageTemplate::kInvalidLhsInAssignment,
- CHECK_OK);
+ expression, lhs_beg_pos, scanner()->location().end_pos,
+ MessageTemplate::kInvalidLhsInAssignment, CHECK_OK);
expression = this->MarkExpressionAsAssigned(expression);
Token::Value op = Next(); // Get assignment operator.
@@ -2848,7 +2896,8 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
ExpressionClassifier rhs_classifier;
ExpressionT right =
this->ParseAssignmentExpression(accept_IN, &rhs_classifier, CHECK_OK);
- classifier->AccumulateReclassifyingAsPattern(rhs_classifier);
+ classifier->Accumulate(rhs_classifier,
+ ExpressionClassifier::ExpressionProduction);
// TODO(1231235): We try to estimate the set of properties set by
// constructors. We define a new property whenever there is an
@@ -3053,11 +3102,11 @@ ParserBase<Traits>::ParseUnaryExpression(ExpressionClassifier* classifier,
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
op = Next();
- Scanner::Location lhs_location = scanner()->peek_location();
+ int beg_pos = peek_position();
ExpressionT expression = this->ParseUnaryExpression(classifier, CHECK_OK);
expression = this->CheckAndRewriteReferenceExpression(
- expression, lhs_location, MessageTemplate::kInvalidLhsInPrefixOp,
- CHECK_OK);
+ expression, beg_pos, scanner()->location().end_pos,
+ MessageTemplate::kInvalidLhsInPrefixOp, CHECK_OK);
this->MarkExpressionAsAssigned(expression);
return factory()->NewCountOperation(op,
@@ -3078,7 +3127,7 @@ ParserBase<Traits>::ParsePostfixExpression(ExpressionClassifier* classifier,
// PostfixExpression ::
// LeftHandSideExpression ('++' | '--')?
- Scanner::Location lhs_location = scanner()->peek_location();
+ int lhs_beg_pos = peek_position();
ExpressionT expression =
this->ParseLeftHandSideExpression(classifier, CHECK_OK);
if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
@@ -3087,8 +3136,8 @@ ParserBase<Traits>::ParsePostfixExpression(ExpressionClassifier* classifier,
ArrowFormalParametersUnexpectedToken(classifier);
expression = this->CheckAndRewriteReferenceExpression(
- expression, lhs_location, MessageTemplate::kInvalidLhsInPostfixOp,
- CHECK_OK);
+ expression, lhs_beg_pos, scanner()->location().end_pos,
+ MessageTemplate::kInvalidLhsInPostfixOp, CHECK_OK);
expression = this->MarkExpressionAsAssigned(expression);
Token::Value next = Next();
@@ -3167,12 +3216,22 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
// they are actually direct calls to eval is determined at run time.
this->CheckPossibleEvalCall(result, scope_);
+ bool is_super_call = result->IsSuperCallReference();
if (spread_pos.IsValid()) {
args = Traits::PrepareSpreadArguments(args);
result = Traits::SpreadCall(result, args, pos);
} else {
result = factory()->NewCall(result, args, pos);
}
+
+ // Explicit calls to the super constructor using super() perform an
+ // implicit binding assignment to the 'this' variable.
+ if (is_super_call) {
+ ExpressionT this_expr = this->ThisExpression(scope_, factory(), pos);
+ result = factory()->NewAssignment(Token::INIT_CONST, this_expr,
+ result, pos);
+ }
+
if (fni_ != NULL) fni_->RemoveLastFunction();
break;
}
@@ -3301,7 +3360,9 @@ ParserBase<Traits>::ParseMemberExpression(ExpressionClassifier* classifier,
function_type = FunctionLiteral::NAMED_EXPRESSION;
}
result = this->ParseFunctionLiteral(
- name, function_name_location, is_strict_reserved_name,
+ name, function_name_location,
+ is_strict_reserved_name ? kFunctionNameIsStrictReserved
+ : kFunctionNameValidityUnknown,
is_generator ? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction,
function_token_position, function_type, FunctionLiteral::NORMAL_ARITY,
@@ -3447,10 +3508,15 @@ ParserBase<Traits>::ParseStrongSuperCallExpression(
function_state_->set_super_location(super_loc);
if (spread_pos.IsValid()) {
args = Traits::PrepareSpreadArguments(args);
- return Traits::SpreadCall(expr, args, pos);
+ expr = Traits::SpreadCall(expr, args, pos);
} else {
- return factory()->NewCall(expr, args, pos);
+ expr = factory()->NewCall(expr, args, pos);
}
+
+ // Explicit calls to the super constructor using super() perform an implicit
+ // binding assignment to the 'this' variable.
+ ExpressionT this_expr = this->ThisExpression(scope_, factory(), pos);
+ return factory()->NewAssignment(Token::INIT_CONST, this_expr, expr, pos);
}
@@ -3462,13 +3528,7 @@ ParserBase<Traits>::ParseSuperExpression(bool is_new,
int pos = position();
Expect(Token::SUPER, CHECK_OK);
- Scope* scope = scope_->DeclarationScope();
- while (scope->is_eval_scope() || scope->is_arrow_scope()) {
- scope = scope->outer_scope();
- DCHECK_NOT_NULL(scope);
- scope = scope->DeclarationScope();
- }
-
+ Scope* scope = scope_->ReceiverScope();
FunctionKind kind = scope->function_kind();
if (IsConciseMethod(kind) || IsAccessorFunction(kind) ||
i::IsConstructor(kind)) {
@@ -3506,14 +3566,7 @@ ParserBase<Traits>::ParseNewTargetExpression(bool* ok) {
Consume(Token::PERIOD);
ExpectContextualKeyword(CStrVector("target"), CHECK_OK);
- Scope* scope = scope_->DeclarationScope();
- while (scope->is_eval_scope() || scope->is_arrow_scope()) {
- scope = scope->outer_scope();
- DCHECK_NOT_NULL(scope);
- scope = scope->DeclarationScope();
- }
-
- if (!scope->is_function_scope()) {
+ if (!scope_->ReceiverScope()->is_function_scope()) {
ReportMessageAt(scanner()->location(),
MessageTemplate::kUnexpectedNewTarget);
*ok = false;
@@ -3590,10 +3643,10 @@ ParserBase<Traits>::ParseMemberExpressionContinuation(
template <class Traits>
void ParserBase<Traits>::ParseFormalParameter(
- bool is_rest, FormalParameterParsingStateT* parsing_state,
- ExpressionClassifier* classifier, bool* ok) {
+ FormalParametersT* parameters, ExpressionClassifier* classifier, bool* ok) {
// FormalParameter[Yield,GeneratorParameter] :
// BindingElement[?Yield, ?GeneratorParameter]
+ bool is_rest = parameters->has_rest;
Token::Value next = peek();
ExpressionT pattern = ParsePrimaryExpression(classifier, ok);
@@ -3602,30 +3655,32 @@ void ParserBase<Traits>::ParseFormalParameter(
ValidateBindingPattern(classifier, ok);
if (!*ok) return;
- if (!allow_harmony_destructuring() && !Traits::IsIdentifier(pattern)) {
- ReportUnexpectedToken(next);
- *ok = false;
- return;
+ if (!Traits::IsIdentifier(pattern)) {
+ if (is_rest || !allow_harmony_destructuring()) {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return;
+ }
+ parameters->is_simple = false;
}
- if (parsing_state->is_simple_parameter_list) {
- parsing_state->is_simple_parameter_list =
- !is_rest && Traits::IsIdentifier(pattern);
- }
- parsing_state->has_rest = is_rest;
- if (is_rest && !Traits::IsIdentifier(pattern)) {
- ReportUnexpectedToken(next);
- *ok = false;
- return;
+ ExpressionT initializer = Traits::EmptyExpression();
+ if (!is_rest && allow_harmony_default_parameters() && Check(Token::ASSIGN)) {
+ ExpressionClassifier init_classifier;
+ initializer = ParseAssignmentExpression(true, &init_classifier, ok);
+ if (!*ok) return;
+ ValidateExpression(&init_classifier, ok);
+ if (!*ok) return;
+ parameters->is_simple = false;
}
- Traits::DeclareFormalParameter(parsing_state, pattern, classifier, is_rest);
+
+ Traits::AddFormalParameter(parameters, pattern, initializer, is_rest);
}
template <class Traits>
-int ParserBase<Traits>::ParseFormalParameterList(
- FormalParameterParsingStateT* parsing_state,
- ExpressionClassifier* classifier, bool* ok) {
+void ParserBase<Traits>::ParseFormalParameterList(
+ FormalParametersT* parameters, ExpressionClassifier* classifier, bool* ok) {
// FormalParameters[Yield,GeneratorParameter] :
// [empty]
// FormalParameterList[?Yield, ?GeneratorParameter]
@@ -3640,29 +3695,37 @@ int ParserBase<Traits>::ParseFormalParameterList(
// FormalsList[?Yield, ?GeneratorParameter] ,
// FormalParameter[?Yield,?GeneratorParameter]
- int parameter_count = 0;
+ DCHECK_EQ(0, parameters->Arity());
if (peek() != Token::RPAREN) {
do {
- if (++parameter_count > Code::kMaxArguments) {
+ if (parameters->Arity() > Code::kMaxArguments) {
ReportMessage(MessageTemplate::kTooManyParameters);
*ok = false;
- return -1;
+ return;
}
- bool is_rest = allow_harmony_rest_params() && Check(Token::ELLIPSIS);
- ParseFormalParameter(is_rest, parsing_state, classifier, ok);
- if (!*ok) return -1;
- } while (!parsing_state->has_rest && Check(Token::COMMA));
-
- if (parsing_state->has_rest && peek() == Token::COMMA) {
- ReportMessageAt(scanner()->peek_location(),
+ parameters->has_rest =
+ allow_harmony_rest_parameters() && Check(Token::ELLIPSIS);
+ ParseFormalParameter(parameters, classifier, ok);
+ if (!*ok) return;
+ } while (!parameters->has_rest && Check(Token::COMMA));
+
+ if (parameters->has_rest) {
+ parameters->is_simple = false;
+ if (peek() == Token::COMMA) {
+ ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kParamAfterRest);
- *ok = false;
- return -1;
+ *ok = false;
+ return;
+ }
}
}
- return parameter_count;
+ for (int i = 0; i < parameters->Arity(); ++i) {
+ auto parameter = parameters->at(i);
+ Traits::DeclareFormalParameter(
+ parameters->scope, parameter, parameters->is_simple, classifier);
+ }
}
@@ -3699,7 +3762,7 @@ void ParserBase<Traits>::CheckArityRestrictions(
template <class Traits>
typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::ParseArrowFunctionLiteral(
- const FormalParameterParsingStateT& formal_parameters,
+ const FormalParametersT& formal_parameters,
const ExpressionClassifier& formals_classifier, bool* ok) {
if (peek() == Token::ARROW && scanner_->HasAnyLineTerminatorBeforeNext()) {
// ASI inserts `;` after arrow parameters if a line terminator is found.
@@ -3741,7 +3804,7 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
} else {
body = this->ParseEagerFunctionBody(
this->EmptyIdentifier(), RelocInfo::kNoPosition, formal_parameters,
- NULL, Token::INIT_VAR, kArrowFunction, CHECK_OK);
+ kArrowFunction, FunctionLiteral::ANONYMOUS_EXPRESSION, CHECK_OK);
materialized_literal_count =
function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
@@ -3776,6 +3839,8 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
if (is_strict(language_mode())) {
CheckStrictOctalLiteral(formal_parameters.scope->start_position(),
scanner()->location().end_pos, CHECK_OK);
+ }
+ if (is_strict(language_mode()) || allow_harmony_sloppy()) {
this->CheckConflictingVarDeclarations(formal_parameters.scope, CHECK_OK);
}
}
@@ -3897,8 +3962,19 @@ ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start,
template <typename Traits>
typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::CheckAndRewriteReferenceExpression(
- ExpressionT expression, Scanner::Location location,
+ ExpressionT expression, int beg_pos, int end_pos,
MessageTemplate::Template message, bool* ok) {
+ return this->CheckAndRewriteReferenceExpression(expression, beg_pos, end_pos,
+ message, kReferenceError, ok);
+}
+
+
+template <typename Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::CheckAndRewriteReferenceExpression(
+ ExpressionT expression, int beg_pos, int end_pos,
+ MessageTemplate::Template message, ParseErrorType type, bool* ok) {
+ Scanner::Location location(beg_pos, end_pos);
if (this->IsIdentifier(expression)) {
if (is_strict(language_mode()) &&
this->IsEvalOrArguments(this->AsIdentifier(expression))) {
@@ -3924,7 +4000,7 @@ ParserBase<Traits>::CheckAndRewriteReferenceExpression(
ExpressionT error = this->NewThrowReferenceError(message, pos);
return factory()->NewProperty(expression, error, pos);
} else {
- this->ReportMessageAt(location, message, kReferenceError);
+ this->ReportMessageAt(location, message, type);
*ok = false;
return this->EmptyExpression();
}
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 6a7718a323..9bc4e6a562 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -419,6 +419,18 @@ void CallPrinter::PrintLiteral(const AstRawString* value, bool quote) {
#ifdef DEBUG
+// A helper for ast nodes that use FeedbackVectorICSlots.
+static int FormatICSlotNode(Vector<char>* buf, Expression* node,
+ const char* node_name, FeedbackVectorICSlot slot) {
+ int pos = SNPrintF(*buf, "%s", node_name);
+ if (!slot.IsInvalid()) {
+ const char* str = Code::Kind2String(node->FeedbackICSlotKind(0));
+ pos = SNPrintF(*buf + pos, " ICSlot(%d, %s)", slot.ToInt(), str);
+ }
+ return pos;
+}
+
+
PrettyPrinter::PrettyPrinter(Isolate* isolate, Zone* zone) {
output_ = NULL;
size_ = 0;
@@ -1430,11 +1442,12 @@ void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
}
-// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitVariableProxy(VariableProxy* node) {
Variable* var = node->var();
EmbeddedVector<char, 128> buf;
- int pos = SNPrintF(buf, "VAR PROXY");
+ int pos =
+ FormatICSlotNode(&buf, node, "VAR PROXY", node->VariableFeedbackSlot());
+
switch (var->location()) {
case VariableLocation::UNALLOCATED:
break;
@@ -1478,7 +1491,10 @@ void AstPrinter::VisitThrow(Throw* node) {
void AstPrinter::VisitProperty(Property* node) {
- IndentedScope indent(this, "PROPERTY");
+ EmbeddedVector<char, 128> buf;
+ FormatICSlotNode(&buf, node, "PROPERTY", node->PropertyFeedbackSlot());
+ IndentedScope indent(this, buf.start());
+
Visit(node->obj());
Literal* literal = node->key()->AsLiteral();
if (literal != NULL && literal->value()->IsInternalizedString()) {
@@ -1490,7 +1506,10 @@ void AstPrinter::VisitProperty(Property* node) {
void AstPrinter::VisitCall(Call* node) {
- IndentedScope indent(this, "CALL");
+ EmbeddedVector<char, 128> buf;
+ FormatICSlotNode(&buf, node, "CALL", node->CallFeedbackICSlot());
+ IndentedScope indent(this, buf.start());
+
Visit(node->expression());
PrintArguments(node->arguments());
}
@@ -1504,7 +1523,9 @@ void AstPrinter::VisitCallNew(CallNew* node) {
void AstPrinter::VisitCallRuntime(CallRuntime* node) {
- IndentedScope indent(this, "CALL RUNTIME");
+ EmbeddedVector<char, 128> buf;
+ FormatICSlotNode(&buf, node, "CALL RUNTIME", node->CallRuntimeFeedbackSlot());
+ IndentedScope indent(this, buf.start());
PrintLiteralIndented("NAME", node->name(), false);
PrintArguments(node->arguments());
}
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index 070a9eee2b..d2e94b4758 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -7,11 +7,12 @@
#include "src/profile-generator-inl.h"
#include "src/compiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
#include "src/sampler.h"
#include "src/scopeinfo.h"
+#include "src/splay-tree-inl.h"
#include "src/unicode.h"
namespace v8 {
@@ -377,6 +378,9 @@ void CpuProfile::Print() {
}
+CodeMap::~CodeMap() {}
+
+
const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index 1ce1940906..e1826f742e 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -266,6 +266,7 @@ class CpuProfile {
class CodeMap {
public:
CodeMap() {}
+ ~CodeMap();
void AddCode(Address addr, CodeEntry* entry, unsigned size);
void MoveCode(Address from, Address to);
CodeEntry* FindEntry(Address addr, Address* start = NULL);
diff --git a/deps/v8/src/prologue.js b/deps/v8/src/prologue.js
index e7ad29e358..4906d41f3d 100644
--- a/deps/v8/src/prologue.js
+++ b/deps/v8/src/prologue.js
@@ -12,28 +12,44 @@
// Utils
var imports = UNDEFINED;
-var exports = UNDEFINED;
var imports_from_experimental = UNDEFINED;
-
+var exports_to_runtime = UNDEFINED;
+var exports_container = {};
// Export to other scripts.
// In normal natives, this exports functions to other normal natives.
// In experimental natives, this exports to other experimental natives and
// to normal natives that import using utils.ImportFromExperimental.
function Export(f) {
- f.next = exports;
- exports = f;
-};
+ f(exports_container);
+}
+
+
+// Export to the native context for calls from the runtime.
+function ExportToRuntime(f) {
+ f.next = exports_to_runtime;
+ exports_to_runtime = f;
+}
-// Import from other scripts.
+// Import from other scripts. The actual importing happens in PostNatives and
+// PostExperimental so that we can import from scripts executed later. However,
+// that means that the import is not available until the very end. If the
+// import needs to be available immediate, use ImportNow.
// In normal natives, this imports from other normal natives.
// In experimental natives, this imports from other experimental natives and
// whitelisted exports from normal natives.
function Import(f) {
f.next = imports;
imports = f;
-};
+}
+
+// Import immediately from exports of previous scripts. We need this for
+// functions called during bootstrapping. Hooking up imports in PostNatives
+// would be too late.
+function ImportNow(f) {
+ f(exports_container);
+}
// In normal natives, import from experimental natives.
@@ -41,7 +57,7 @@ function Import(f) {
function ImportFromExperimental(f) {
f.next = imports_from_experimental;
imports_from_experimental = f;
-};
+}
function SetFunctionName(f, name, prefix) {
@@ -143,18 +159,24 @@ function SetUpLockedPrototype(
// -----------------------------------------------------------------------
// To be called by bootstrapper
-var experimental_exports = UNDEFINED;
-
function PostNatives(utils) {
%CheckIsBootstrapping();
- var container = {};
- for ( ; !IS_UNDEFINED(exports); exports = exports.next) exports(container);
- for ( ; !IS_UNDEFINED(imports); imports = imports.next) imports(container);
+ for ( ; !IS_UNDEFINED(imports); imports = imports.next) {
+ imports(exports_container);
+ }
- // Whitelist of exports from normal natives to experimental natives.
- var expose_to_experimental = [
+ var runtime_container = {};
+ for ( ; !IS_UNDEFINED(exports_to_runtime);
+ exports_to_runtime = exports_to_runtime.next) {
+ exports_to_runtime(runtime_container);
+ }
+ %ImportToRuntime(runtime_container);
+
+ // Whitelist of exports from normal natives to experimental natives and debug.
+ var expose_list = [
"ArrayToString",
+ "FunctionSourceString",
"GetIterator",
"GetMethod",
"InnerArrayEvery",
@@ -177,56 +199,83 @@ function PostNatives(utils) {
"ObjectDefineProperty",
"OwnPropertyKeys",
"ToNameArray",
+ "ToBoolean",
+ "ToNumber",
+ "ToString",
];
- experimental_exports = {};
+
+ var filtered_exports = {};
%OptimizeObjectForAddingMultipleProperties(
- experimental_exports, expose_to_experimental.length);
- for (var key of expose_to_experimental) {
- experimental_exports[key] = container[key];
+ filtered_exports, expose_list.length);
+ for (var key of expose_list) {
+ filtered_exports[key] = exports_container[key];
}
- %ToFastProperties(experimental_exports);
- container = UNDEFINED;
+ %ToFastProperties(filtered_exports);
+ exports_container = filtered_exports;
utils.PostNatives = UNDEFINED;
utils.ImportFromExperimental = UNDEFINED;
-};
+}
function PostExperimentals(utils) {
%CheckIsBootstrapping();
- for ( ; !IS_UNDEFINED(exports); exports = exports.next) {
- exports(experimental_exports);
- }
for ( ; !IS_UNDEFINED(imports); imports = imports.next) {
- imports(experimental_exports);
+ imports(exports_container);
}
for ( ; !IS_UNDEFINED(imports_from_experimental);
imports_from_experimental = imports_from_experimental.next) {
- imports_from_experimental(experimental_exports);
+ imports_from_experimental(exports_container);
+ }
+ var runtime_container = {};
+ for ( ; !IS_UNDEFINED(exports_to_runtime);
+ exports_to_runtime = exports_to_runtime.next) {
+ exports_to_runtime(runtime_container);
+ }
+ %ImportExperimentalToRuntime(runtime_container);
+
+ exports_container = UNDEFINED;
+
+ utils.PostExperimentals = UNDEFINED;
+ utils.PostDebug = UNDEFINED;
+ utils.Import = UNDEFINED;
+ utils.Export = UNDEFINED;
+}
+
+
+function PostDebug(utils) {
+ for ( ; !IS_UNDEFINED(imports); imports = imports.next) {
+ imports(exports_container);
}
- experimental_exports = UNDEFINED;
+ exports_container = UNDEFINED;
+ utils.PostDebug = UNDEFINED;
utils.PostExperimentals = UNDEFINED;
utils.Import = UNDEFINED;
utils.Export = UNDEFINED;
-};
+}
// -----------------------------------------------------------------------
-InstallFunctions(utils, NONE, [
- "Import", Import,
- "Export", Export,
- "ImportFromExperimental", ImportFromExperimental,
- "SetFunctionName", SetFunctionName,
- "InstallConstants", InstallConstants,
- "InstallFunctions", InstallFunctions,
- "InstallGetter", InstallGetter,
- "InstallGetterSetter", InstallGetterSetter,
- "SetUpLockedPrototype", SetUpLockedPrototype,
- "PostNatives", PostNatives,
- "PostExperimentals", PostExperimentals,
-]);
+%OptimizeObjectForAddingMultipleProperties(utils, 14);
+
+utils.Import = Import;
+utils.ImportNow = ImportNow;
+utils.Export = Export;
+utils.ExportToRuntime = ExportToRuntime;
+utils.ImportFromExperimental = ImportFromExperimental;
+utils.SetFunctionName = SetFunctionName;
+utils.InstallConstants = InstallConstants;
+utils.InstallFunctions = InstallFunctions;
+utils.InstallGetter = InstallGetter;
+utils.InstallGetterSetter = InstallGetterSetter;
+utils.SetUpLockedPrototype = SetUpLockedPrototype;
+utils.PostNatives = PostNatives;
+utils.PostExperimentals = PostExperimentals;
+utils.PostDebug = PostDebug;
+
+%ToFastProperties(utils);
})
diff --git a/deps/v8/src/promise.js b/deps/v8/src/promise.js
index 0fd4b89c51..0233dbebe4 100644
--- a/deps/v8/src/promise.js
+++ b/deps/v8/src/promise.js
@@ -2,12 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $promiseCreate;
-var $promiseResolve;
-var $promiseReject;
-var $promiseChain;
-var $promiseCatch;
-var $promiseThen;
var $promiseHasUserDefinedRejectHandler;
var $promiseStatus;
var $promiseValue;
@@ -386,14 +380,19 @@ utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
"catch", PromiseCatch
]);
-$promiseCreate = PromiseCreate;
-$promiseResolve = PromiseResolve;
-$promiseReject = PromiseReject;
-$promiseChain = PromiseChain;
-$promiseCatch = PromiseCatch;
-$promiseThen = PromiseThen;
-$promiseHasUserDefinedRejectHandler = PromiseHasUserDefinedRejectHandler;
$promiseStatus = promiseStatus;
$promiseValue = promiseValue;
+utils.ExportToRuntime(function(to) {
+ to.promiseStatus = promiseStatus;
+ to.promiseValue = promiseValue;
+ to.PromiseCreate = PromiseCreate;
+ to.PromiseResolve = PromiseResolve;
+ to.PromiseReject = PromiseReject;
+ to.PromiseChain = PromiseChain;
+ to.PromiseCatch = PromiseCatch;
+ to.PromiseThen = PromiseThen;
+ to.PromiseHasUserDefinedRejectHandler = PromiseHasUserDefinedRejectHandler;
+});
+
})
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 791eb524c7..33d3b8d7ef 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -53,7 +53,7 @@ enum PropertyLocation { kField = 0, kDescriptor = 1 };
// Order of properties is significant.
// Must fit in the BitField PropertyDetails::TypeField.
-// A copy of this is in mirror-debugger.js.
+// A copy of this is in debug/mirrors.js.
enum PropertyType {
DATA = (kField << 1) | kData,
DATA_CONSTANT = (kDescriptor << 1) | kData,
@@ -320,6 +320,8 @@ class PropertyDetails BASE_EMBEDDED {
class KindField : public BitField<PropertyKind, 0, 1> {};
class LocationField : public BitField<PropertyLocation, 1, 1> {};
class AttributesField : public BitField<PropertyAttributes, 2, 3> {};
+ static const int kAttributesReadOnlyMask =
+ (READ_ONLY << AttributesField::kShift);
// Bit fields for normalized objects.
class PropertyCellTypeField : public BitField<PropertyCellType, 5, 2> {};
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 5341f27037..09ec5f207f 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -9,7 +9,6 @@
#include "src/factory.h"
#include "src/field-index.h"
-#include "src/field-index-inl.h"
#include "src/isolate.h"
#include "src/types.h"
diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js
index 782035b6f0..88b6a273ba 100644
--- a/deps/v8/src/proxy.js
+++ b/deps/v8/src/proxy.js
@@ -2,11 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $proxyDerivedGetTrap;
-var $proxyDerivedHasTrap;
-var $proxyDerivedSetTrap;
-var $proxyEnumerate;
-
(function(global, utils) {
"use strict";
@@ -199,15 +194,17 @@ utils.InstallFunctions(Proxy, DONT_ENUM, [
// -------------------------------------------------------------------
// Exports
-$proxyDerivedGetTrap = DerivedGetTrap;
-$proxyDerivedHasTrap = DerivedHasTrap;
-$proxyDerivedSetTrap = DerivedSetTrap;
-$proxyEnumerate = ProxyEnumerate;
-
utils.Export(function(to) {
to.ProxyDelegateCallAndConstruct = DelegateCallAndConstruct;
to.ProxyDerivedHasOwnTrap = DerivedHasOwnTrap;
to.ProxyDerivedKeysTrap = DerivedKeysTrap;
});
+utils.ExportToRuntime(function(to) {
+ to.ProxyDerivedGetTrap = DerivedGetTrap;
+ to.ProxyDerivedHasTrap = DerivedHasTrap;
+ to.ProxyDerivedSetTrap = DerivedSetTrap;
+ to.ProxyEnumerate = ProxyEnumerate;
+});
+
})
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index bf75ca1b01..e717b26158 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -16,6 +16,10 @@ var harmony_unicode_regexps = false;
var GlobalRegExp = global.RegExp;
var InternalPackedArray = utils.InternalPackedArray;
+utils.Import(function(from) {
+ ToNumber = from.ToNumber;
+});
+
// -------------------------------------------------------------------
// Property of the builtins object for recording the result of the last
diff --git a/deps/v8/src/regexp/OWNERS b/deps/v8/src/regexp/OWNERS
new file mode 100644
index 0000000000..d9d588df6c
--- /dev/null
+++ b/deps/v8/src/regexp/OWNERS
@@ -0,0 +1,6 @@
+set noparent
+
+jochen@chromium.org
+marja@chromium.org
+ulan@chromium.org
+yangguo@chromium.org
diff --git a/deps/v8/src/regexp/arm/OWNERS b/deps/v8/src/regexp/arm/OWNERS
new file mode 100644
index 0000000000..906a5ce641
--- /dev/null
+++ b/deps/v8/src/regexp/arm/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 9f4b4af42d..d502060440 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -2,20 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM
+#include "src/regexp/arm/regexp-macro-assembler-arm.h"
+
#include "src/code-stubs.h"
#include "src/cpu-profiler.h"
#include "src/log.h"
#include "src/macro-assembler.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-stack.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
-#include "src/arm/regexp-macro-assembler-arm.h"
-
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index 078d0dfa62..123a95711e 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
-#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
+#ifndef V8_REGEXP_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
+#define V8_REGEXP_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#include "src/arm/assembler-arm.h"
-#include "src/arm/assembler-arm-inl.h"
#include "src/macro-assembler.h"
+#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
@@ -216,4 +216,4 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
}} // namespace v8::internal
-#endif // V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
+#endif // V8_REGEXP_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
diff --git a/deps/v8/src/regexp/arm64/OWNERS b/deps/v8/src/regexp/arm64/OWNERS
new file mode 100644
index 0000000000..906a5ce641
--- /dev/null
+++ b/deps/v8/src/regexp/arm64/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 801cc1359b..ed24cf3401 100644
--- a/deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -2,20 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
+#include "src/regexp/arm64/regexp-macro-assembler-arm64.h"
+
#include "src/code-stubs.h"
#include "src/cpu-profiler.h"
#include "src/log.h"
#include "src/macro-assembler.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-stack.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
-#include "src/arm64/regexp-macro-assembler-arm64.h"
-
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index ae4393f7ac..a48291a421 100644
--- a/deps/v8/src/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
-#define V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
-
-#include "src/macro-assembler.h"
+#ifndef V8_REGEXP_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
+#define V8_REGEXP_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
#include "src/arm64/assembler-arm64.h"
-#include "src/arm64/assembler-arm64-inl.h"
+#include "src/macro-assembler.h"
+#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
@@ -292,4 +291,4 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
}} // namespace v8::internal
-#endif // V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
+#endif // V8_REGEXP_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/bytecodes-irregexp.h b/deps/v8/src/regexp/bytecodes-irregexp.h
index 04b9740acb..27691422f3 100644
--- a/deps/v8/src/bytecodes-irregexp.h
+++ b/deps/v8/src/regexp/bytecodes-irregexp.h
@@ -3,8 +3,8 @@
// found in the LICENSE file.
-#ifndef V8_BYTECODES_IRREGEXP_H_
-#define V8_BYTECODES_IRREGEXP_H_
+#ifndef V8_REGEXP_BYTECODES_IRREGEXP_H_
+#define V8_REGEXP_BYTECODES_IRREGEXP_H_
namespace v8 {
namespace internal {
@@ -78,4 +78,4 @@ BYTECODE_ITERATOR(DECLARE_BYTECODE_LENGTH)
#undef DECLARE_BYTECODE_LENGTH
} }
-#endif // V8_BYTECODES_IRREGEXP_H_
+#endif // V8_REGEXP_BYTECODES_IRREGEXP_H_
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 36be56e46b..3ba5db14f2 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -2,19 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
+#include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
+
#include "src/cpu-profiler.h"
#include "src/log.h"
#include "src/macro-assembler.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-stack.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
-#include "src/ia32/regexp-macro-assembler-ia32.h"
-
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index 2927a26077..8ec0a9e543 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
-#define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
+#ifndef V8_REGEXP_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
+#define V8_REGEXP_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
#include "src/ia32/assembler-ia32.h"
-#include "src/ia32/assembler-ia32-inl.h"
#include "src/macro-assembler.h"
+#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
@@ -198,4 +198,4 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
}} // namespace v8::internal
-#endif // V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
+#endif // V8_REGEXP_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
diff --git a/deps/v8/src/interpreter-irregexp.cc b/deps/v8/src/regexp/interpreter-irregexp.cc
index 97c9ba0551..afc31a3d57 100644
--- a/deps/v8/src/interpreter-irregexp.cc
+++ b/deps/v8/src/regexp/interpreter-irregexp.cc
@@ -4,14 +4,12 @@
// A simple interpreter for the Irregexp byte code.
-
-#include "src/v8.h"
+#include "src/regexp/interpreter-irregexp.h"
#include "src/ast.h"
-#include "src/bytecodes-irregexp.h"
-#include "src/interpreter-irregexp.h"
-#include "src/jsregexp.h"
-#include "src/regexp-macro-assembler.h"
+#include "src/regexp/bytecodes-irregexp.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
#include "src/unicode.h"
#include "src/utils.h"
diff --git a/deps/v8/src/interpreter-irregexp.h b/deps/v8/src/regexp/interpreter-irregexp.h
index 4953a601e4..d97d3b0f17 100644
--- a/deps/v8/src/interpreter-irregexp.h
+++ b/deps/v8/src/regexp/interpreter-irregexp.h
@@ -4,8 +4,10 @@
// A simple interpreter for the Irregexp byte code.
-#ifndef V8_INTERPRETER_IRREGEXP_H_
-#define V8_INTERPRETER_IRREGEXP_H_
+#ifndef V8_REGEXP_INTERPRETER_IRREGEXP_H_
+#define V8_REGEXP_INTERPRETER_IRREGEXP_H_
+
+#include "src/regexp/jsregexp.h"
namespace v8 {
namespace internal {
@@ -23,4 +25,4 @@ class IrregexpInterpreter {
} } // namespace v8::internal
-#endif // V8_INTERPRETER_IRREGEXP_H_
+#endif // V8_REGEXP_INTERPRETER_IRREGEXP_H_
diff --git a/deps/v8/src/jsregexp-inl.h b/deps/v8/src/regexp/jsregexp-inl.h
index 1ab70b8c4b..118f3dba9c 100644
--- a/deps/v8/src/jsregexp-inl.h
+++ b/deps/v8/src/regexp/jsregexp-inl.h
@@ -3,14 +3,14 @@
// found in the LICENSE file.
-#ifndef V8_JSREGEXP_INL_H_
-#define V8_JSREGEXP_INL_H_
+#ifndef V8_REGEXP_JSREGEXP_INL_H_
+#define V8_REGEXP_JSREGEXP_INL_H_
#include "src/allocation.h"
#include "src/handles.h"
#include "src/heap/heap.h"
-#include "src/jsregexp.h"
#include "src/objects.h"
+#include "src/regexp/jsregexp.h"
namespace v8 {
namespace internal {
@@ -80,4 +80,4 @@ int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() {
} } // namespace v8::internal
-#endif // V8_JSREGEXP_INL_H_
+#endif // V8_REGEXP_JSREGEXP_INL_H_
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index a02141d77a..aacaa1b9d3 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/regexp/jsregexp.h"
#include "src/ast.h"
#include "src/base/platform/platform.h"
@@ -10,43 +10,42 @@
#include "src/compiler.h"
#include "src/execution.h"
#include "src/factory.h"
-#include "src/jsregexp-inl.h"
-#include "src/jsregexp.h"
#include "src/messages.h"
#include "src/ostreams.h"
#include "src/parser.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-macro-assembler-irregexp.h"
-#include "src/regexp-macro-assembler-tracer.h"
-#include "src/regexp-stack.h"
+#include "src/regexp/interpreter-irregexp.h"
+#include "src/regexp/jsregexp-inl.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-macro-assembler-irregexp.h"
+#include "src/regexp/regexp-macro-assembler-tracer.h"
+#include "src/regexp/regexp-stack.h"
#include "src/runtime/runtime.h"
+#include "src/splay-tree-inl.h"
#include "src/string-search.h"
#include "src/unicode-decoder.h"
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/regexp-macro-assembler-ia32.h" // NOLINT
+#include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "src/x64/regexp-macro-assembler-x64.h" // NOLINT
+#include "src/regexp/x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/regexp-macro-assembler-arm64.h" // NOLINT
+#include "src/regexp/arm64/regexp-macro-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
+#include "src/regexp/arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/regexp-macro-assembler-ppc.h" // NOLINT
+#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
+#include "src/regexp/mips/regexp-macro-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/regexp-macro-assembler-mips64.h" // NOLINT
+#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
#elif V8_TARGET_ARCH_X87
-#include "src/x87/regexp-macro-assembler-x87.h" // NOLINT
+#include "src/regexp/x87/regexp-macro-assembler-x87.h"
#else
#error Unsupported target architecture.
#endif
#endif
-#include "src/interpreter-irregexp.h"
-
namespace v8 {
namespace internal {
@@ -596,7 +595,7 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
if (required_registers > Isolate::kJSRegexpStaticOffsetsVectorSize) {
output_registers = NewArray<int32_t>(required_registers);
}
- SmartArrayPointer<int32_t> auto_release(output_registers);
+ base::SmartArrayPointer<int32_t> auto_release(output_registers);
if (output_registers == NULL) {
output_registers = isolate->jsregexp_static_offsets_vector();
}
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index ff7759bfec..39e702149d 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_JSREGEXP_H_
-#define V8_JSREGEXP_H_
+#ifndef V8_REGEXP_JSREGEXP_H_
+#define V8_REGEXP_JSREGEXP_H_
#include "src/allocation.h"
#include "src/assembler.h"
@@ -1661,4 +1661,4 @@ class RegExpEngine: public AllStatic {
} } // namespace v8::internal
-#endif // V8_JSREGEXP_H_
+#endif // V8_REGEXP_JSREGEXP_H_
diff --git a/deps/v8/src/regexp/mips/OWNERS b/deps/v8/src/regexp/mips/OWNERS
new file mode 100644
index 0000000000..5508ba626f
--- /dev/null
+++ b/deps/v8/src/regexp/mips/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index f29802130d..77f09917c0 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -2,19 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS
+#include "src/regexp/mips/regexp-macro-assembler-mips.h"
+
#include "src/code-stubs.h"
#include "src/log.h"
#include "src/macro-assembler.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-stack.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
-#include "src/mips/regexp-macro-assembler-mips.h"
-
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.h b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index 65ee173b0f..36fd4b1564 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
-#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#ifndef V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#define V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
#include "src/macro-assembler.h"
-#include "src/mips/assembler-mips-inl.h"
#include "src/mips/assembler-mips.h"
-#include "src/mips/macro-assembler-mips.h"
+#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
@@ -224,4 +222,4 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
}} // namespace v8::internal
-#endif // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#endif // V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/regexp/mips64/OWNERS b/deps/v8/src/regexp/mips64/OWNERS
new file mode 100644
index 0000000000..5508ba626f
--- /dev/null
+++ b/deps/v8/src/regexp/mips64/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index ca62f5b508..838fc68a80 100644
--- a/deps/v8/src/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -2,19 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_MIPS64
+#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
+
#include "src/code-stubs.h"
#include "src/log.h"
#include "src/macro-assembler.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-stack.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
-#include "src/mips64/regexp-macro-assembler-mips64.h"
-
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index dd02611fbe..84c85affbe 100644
--- a/deps/v8/src/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
-#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#ifndef V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#define V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
#include "src/macro-assembler.h"
-#include "src/mips64/assembler-mips64-inl.h"
#include "src/mips64/assembler-mips64.h"
-#include "src/mips64/macro-assembler-mips64.h"
+#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
@@ -266,4 +264,4 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
}} // namespace v8::internal
-#endif // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#endif // V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/regexp/ppc/OWNERS b/deps/v8/src/regexp/ppc/OWNERS
new file mode 100644
index 0000000000..eb007cb908
--- /dev/null
+++ b/deps/v8/src/regexp/ppc/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/src/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index 05e84e415f..e820aa9bbf 100644
--- a/deps/v8/src/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -2,21 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_PPC
+#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
+
#include "src/base/bits.h"
#include "src/code-stubs.h"
#include "src/cpu-profiler.h"
#include "src/log.h"
#include "src/macro-assembler.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-stack.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
-#include "src/ppc/regexp-macro-assembler-ppc.h"
-
namespace v8 {
namespace internal {
@@ -171,8 +169,7 @@ void RegExpMacroAssemblerPPC::Backtrack() {
// Pop Code* offset from backtrack stack, add Code* and jump to location.
Pop(r3);
__ add(r3, r3, code_pointer());
- __ mtctr(r3);
- __ bctr();
+ __ Jump(r3);
}
@@ -834,8 +831,8 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
// Restore registers r25..r31 and return (restoring lr to pc).
__ MultiPop(registers_to_retain);
__ pop(r0);
- __ mtctr(r0);
- __ bctr();
+ __ mtlr(r0);
+ __ blr();
// Backtrack code (branch target for conditional backtracks).
if (backtrack_label_.is_linked()) {
diff --git a/deps/v8/src/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index 0dd292b317..2dd339eb8d 100644
--- a/deps/v8/src/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
-#define V8_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
+#ifndef V8_REGEXP_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
+#define V8_REGEXP_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
#include "src/macro-assembler.h"
#include "src/ppc/assembler-ppc.h"
-#include "src/ppc/assembler-ppc-inl.h"
+#include "src/ppc/frames-ppc.h"
+#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
@@ -210,4 +211,4 @@ const RegList kRegExpCalleeSaved =
}
} // namespace v8::internal
-#endif // V8_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
+#endif // V8_REGEXP_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp-inl.h b/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
index 942cf57521..b86d28dfb9 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp-inl.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
@@ -2,16 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// A light-weight assembler for the Irregexp byte code.
-
-
-#include "src/v8.h"
+#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
+#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
#include "src/ast.h"
-#include "src/bytecodes-irregexp.h"
-
-#ifndef V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
-#define V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
+#include "src/regexp/bytecodes-irregexp.h"
namespace v8 {
namespace internal {
@@ -63,4 +58,4 @@ void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
} } // namespace v8::internal
-#endif // V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
+#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
index 7bd3b56b0f..ca567c9bda 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/regexp/regexp-macro-assembler-irregexp.h"
#include "src/ast.h"
-#include "src/bytecodes-irregexp.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-macro-assembler-irregexp.h"
-#include "src/regexp-macro-assembler-irregexp-inl.h"
+#include "src/regexp/bytecodes-irregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-macro-assembler-irregexp-inl.h"
namespace v8 {
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
index 781defc297..556d78d23d 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
@@ -2,16 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
-#define V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
+#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
+#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
-#include "src/regexp-macro-assembler.h"
+#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
#ifdef V8_INTERPRETED_REGEXP
+// A light-weight assembler for the Irregexp byte code.
class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
public:
// Create an assembler. Instructions and relocation information are emitted
@@ -126,4 +127,4 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
} } // namespace v8::internal
-#endif // V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
+#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
index 19fae2f9ac..2abe55588e 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
@@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/regexp/regexp-macro-assembler-tracer.h"
#include "src/ast.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-macro-assembler-tracer.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
index 8b8d80a15a..d4092ceaad 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
@@ -2,8 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
-#define V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
+#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_TRACER_H_
+#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_TRACER_H_
+
+#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
@@ -81,4 +83,4 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
}} // namespace v8::internal
-#endif // V8_REGEXP_MACRO_ASSEMBLER_TRACER_H_
+#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_TRACER_H_
diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 48cbbf3ed1..20105c0b30 100644
--- a/deps/v8/src/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -2,12 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/regexp/regexp-macro-assembler.h"
#include "src/assembler.h"
#include "src/ast.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-stack.h"
+#include "src/regexp/regexp-stack.h"
#include "src/simulator.h"
namespace v8 {
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index df244249b6..c3d94a6acf 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGEXP_MACRO_ASSEMBLER_H_
-#define V8_REGEXP_MACRO_ASSEMBLER_H_
+#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
+#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
#include "src/ast.h"
@@ -247,4 +247,4 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
} } // namespace v8::internal
-#endif // V8_REGEXP_MACRO_ASSEMBLER_H_
+#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
diff --git a/deps/v8/src/regexp-stack.cc b/deps/v8/src/regexp/regexp-stack.cc
index 0ef4942048..348e684151 100644
--- a/deps/v8/src/regexp-stack.cc
+++ b/deps/v8/src/regexp/regexp-stack.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/regexp/regexp-stack.h"
-#include "src/regexp-stack.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp-stack.h b/deps/v8/src/regexp/regexp-stack.h
index d18ce708d6..9a6394e198 100644
--- a/deps/v8/src/regexp-stack.h
+++ b/deps/v8/src/regexp/regexp-stack.h
@@ -2,8 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGEXP_STACK_H_
-#define V8_REGEXP_STACK_H_
+#ifndef V8_REGEXP_REGEXP_STACK_H_
+#define V8_REGEXP_REGEXP_STACK_H_
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -122,4 +126,4 @@ class RegExpStack {
}} // namespace v8::internal
-#endif // V8_REGEXP_STACK_H_
+#endif // V8_REGEXP_REGEXP_STACK_H_
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index f1eb072075..c0f5f2c323 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
+#include "src/regexp/x64/regexp-macro-assembler-x64.h"
+
#include "src/cpu-profiler.h"
#include "src/log.h"
#include "src/macro-assembler.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-stack.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
-#include "src/x64/regexp-macro-assembler-x64.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index 70a6709b54..d690dc1974 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
-#define V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
+#ifndef V8_REGEXP_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
+#define V8_REGEXP_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
#include "src/macro-assembler.h"
-#include "src/x64/assembler-x64-inl.h"
+#include "src/regexp/regexp-macro-assembler.h"
#include "src/x64/assembler-x64.h"
-#include "src/x64/macro-assembler-x64.h"
namespace v8 {
namespace internal {
@@ -279,4 +278,4 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
}} // namespace v8::internal
-#endif // V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
+#endif // V8_REGEXP_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
diff --git a/deps/v8/src/regexp/x87/OWNERS b/deps/v8/src/regexp/x87/OWNERS
new file mode 100644
index 0000000000..dd9998b261
--- /dev/null
+++ b/deps/v8/src/regexp/x87/OWNERS
@@ -0,0 +1 @@
+weiliang.lin@intel.com
diff --git a/deps/v8/src/x87/regexp-macro-assembler-x87.cc b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
index e043f6ea6e..c30994eae0 100644
--- a/deps/v8/src/x87/regexp-macro-assembler-x87.cc
+++ b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
@@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
+#include "src/regexp/x87/regexp-macro-assembler-x87.h"
+
#include "src/cpu-profiler.h"
#include "src/log.h"
#include "src/macro-assembler.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-stack.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
-#include "src/x87/regexp-macro-assembler-x87.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/x87/regexp-macro-assembler-x87.h b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h
index f893262704..f636ca08ce 100644
--- a/deps/v8/src/x87/regexp-macro-assembler-x87.h
+++ b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
-#define V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
+#ifndef V8_REGEXP_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
+#define V8_REGEXP_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
#include "src/macro-assembler.h"
-#include "src/x87/assembler-x87-inl.h"
+#include "src/regexp/regexp-macro-assembler.h"
#include "src/x87/assembler-x87.h"
namespace v8 {
@@ -198,4 +198,4 @@ class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler {
}} // namespace v8::internal
-#endif // V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
+#endif // V8_REGEXP_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index c901653a2b..f7b0ce005e 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/rewriter.h"
#include "src/ast.h"
#include "src/parser.h"
-#include "src/rewriter.h"
#include "src/scopes.h"
namespace v8 {
@@ -211,7 +210,7 @@ EXPRESSION_NODE_LIST(DEF_VISIT)
// Assumes code has been parsed. Mutates the AST, so the AST should not
// continue to be used in the case of failure.
bool Rewriter::Rewrite(ParseInfo* info) {
- FunctionLiteral* function = info->function();
+ FunctionLiteral* function = info->literal();
DCHECK(function != NULL);
Scope* scope = function->scope();
DCHECK(scope != NULL);
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 351bca7b6a..4a45baf522 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -12,9 +12,9 @@
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/execution.h"
-#include "src/full-codegen.h"
+#include "src/frames-inl.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
-#include "src/heap/mark-compact.h"
#include "src/scopeinfo.h"
namespace v8 {
@@ -181,10 +181,12 @@ void RuntimeProfiler::OptimizeNow() {
// Attempt OSR if we are still running unoptimized code even though the
// the function has long been marked or even already been optimized.
int ticks = shared_code->profiler_ticks();
- int allowance = kOSRCodeSizeAllowanceBase +
- ticks * kOSRCodeSizeAllowancePerTick;
- if (shared_code->CodeSize() > allowance) {
- if (ticks < 255) shared_code->set_profiler_ticks(ticks + 1);
+ int64_t allowance =
+ kOSRCodeSizeAllowanceBase +
+ static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTick;
+ if (shared_code->CodeSize() > allowance &&
+ ticks < Code::ProfilerTicksField::kMax) {
+ shared_code->set_profiler_ticks(ticks + 1);
} else {
AttemptOnStackReplacement(function);
}
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index fa71432883..e2e6d2ef35 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -9,64 +9,8 @@
// ALL CAPS. The compiled code passes the first argument in 'this'.
-/* -----------------------------------
- - - - C o m p a r i s o n - - -
- -----------------------------------
-*/
-
// The following declarations are shared with other native JS files.
// They are all declared at this one spot to avoid redeclaration errors.
-var EQUALS;
-var STRICT_EQUALS;
-var COMPARE;
-var COMPARE_STRONG;
-var ADD;
-var ADD_STRONG;
-var STRING_ADD_LEFT;
-var STRING_ADD_LEFT_STRONG;
-var STRING_ADD_RIGHT;
-var STRING_ADD_RIGHT_STRONG;
-var SUB;
-var SUB_STRONG;
-var MUL;
-var MUL_STRONG;
-var DIV;
-var DIV_STRONG;
-var MOD;
-var MOD_STRONG;
-var BIT_OR;
-var BIT_OR_STRONG;
-var BIT_AND;
-var BIT_AND_STRONG;
-var BIT_XOR;
-var BIT_XOR_STRONG;
-var SHL;
-var SHL_STRONG;
-var SAR;
-var SAR_STRONG;
-var SHR;
-var SHR_STRONG;
-var DELETE;
-var IN;
-var INSTANCE_OF;
-var CALL_NON_FUNCTION;
-var CALL_NON_FUNCTION_AS_CONSTRUCTOR;
-var CALL_FUNCTION_PROXY;
-var CALL_FUNCTION_PROXY_AS_CONSTRUCTOR;
-var CONCAT_ITERABLE_TO_ARRAY;
-var APPLY_PREPARE;
-var REFLECT_APPLY_PREPARE;
-var REFLECT_CONSTRUCT_PREPARE;
-var STACK_OVERFLOW;
-var TO_OBJECT;
-var TO_NUMBER;
-var TO_STRING;
-var TO_NAME;
-
-var StringLengthTFStub;
-var StringAddTFStub;
-var MathFloorStub;
-
var $defaultNumber;
var $defaultString;
var $NaN;
@@ -74,17 +18,13 @@ var $nonNumberToNumber;
var $nonStringToString;
var $sameValue;
var $sameValueZero;
-var $toBoolean;
-var $toInt32;
var $toInteger;
var $toLength;
var $toName;
var $toNumber;
-var $toObject;
var $toPositiveInteger;
var $toPrimitive;
var $toString;
-var $toUint32;
(function(global, utils) {
@@ -97,8 +37,13 @@ var GlobalNumber = global.Number;
// ----------------------------------------------------------------------------
+/* -----------------------------------
+- - - C o m p a r i s o n - - -
+-----------------------------------
+*/
+
// ECMA-262 Section 11.9.3.
-EQUALS = function EQUALS(y) {
+function EQUALS(y) {
if (IS_STRING(this) && IS_STRING(y)) return %StringEquals(this, y);
var x = this;
@@ -107,8 +52,8 @@ EQUALS = function EQUALS(y) {
while (true) {
if (IS_NUMBER(y)) return %NumberEquals(x, y);
if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
- if (IS_SYMBOL(y)) return 1; // not equal
if (!IS_SPEC_OBJECT(y)) {
+ if (IS_SYMBOL(y) || IS_SIMD_VALUE(y)) return 1; // not equal
// String or boolean.
return %NumberEquals(x, %$toNumber(y));
}
@@ -117,10 +62,10 @@ EQUALS = function EQUALS(y) {
} else if (IS_STRING(x)) {
while (true) {
if (IS_STRING(y)) return %StringEquals(x, y);
- if (IS_SYMBOL(y)) return 1; // not equal
if (IS_NUMBER(y)) return %NumberEquals(%$toNumber(x), y);
if (IS_BOOLEAN(y)) return %NumberEquals(%$toNumber(x), %$toNumber(y));
if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
+ if (IS_SYMBOL(y) || IS_SIMD_VALUE(y)) return 1; // not equal
y = %$toPrimitive(y, NO_HINT);
}
} else if (IS_SYMBOL(x)) {
@@ -131,47 +76,33 @@ EQUALS = function EQUALS(y) {
if (IS_NULL_OR_UNDEFINED(y)) return 1;
if (IS_NUMBER(y)) return %NumberEquals(%$toNumber(x), y);
if (IS_STRING(y)) return %NumberEquals(%$toNumber(x), %$toNumber(y));
- if (IS_SYMBOL(y)) return 1; // not equal
+ if (IS_SYMBOL(y) || IS_SIMD_VALUE(y)) return 1; // not equal
// y is object.
x = %$toNumber(x);
y = %$toPrimitive(y, NO_HINT);
} else if (IS_NULL_OR_UNDEFINED(x)) {
return IS_NULL_OR_UNDEFINED(y) ? 0 : 1;
+ } else if (IS_SIMD_VALUE(x)) {
+ if (!IS_SIMD_VALUE(y)) return 1; // not equal
+ return %SimdEquals(x, y);
} else {
// x is an object.
- if (IS_SPEC_OBJECT(y)) {
- return %_ObjectEquals(x, y) ? 0 : 1;
- }
+ if (IS_SPEC_OBJECT(y)) return %_ObjectEquals(x, y) ? 0 : 1;
if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
- if (IS_SYMBOL(y)) return 1; // not equal
- if (IS_BOOLEAN(y)) y = %$toNumber(y);
+ if (IS_BOOLEAN(y)) {
+ y = %$toNumber(y);
+ } else if (IS_SYMBOL(y) || IS_SIMD_VALUE(y)) {
+ return 1; // not equal
+ }
x = %$toPrimitive(x, NO_HINT);
}
}
}
-// ECMA-262, section 11.9.4, page 56.
-STRICT_EQUALS = function STRICT_EQUALS(x) {
- if (IS_STRING(this)) {
- if (!IS_STRING(x)) return 1; // not equal
- return %StringEquals(this, x);
- }
-
- if (IS_NUMBER(this)) {
- if (!IS_NUMBER(x)) return 1; // not equal
- return %NumberEquals(this, x);
- }
-
- // If anything else gets here, we just do simple identity check.
- // Objects (including functions), null, undefined and booleans were
- // checked in the CompareStub, so there should be nothing left.
- return %_ObjectEquals(this, x) ? 0 : 1;
-}
-
// ECMA-262, section 11.8.5, page 53. The 'ncr' parameter is used as
// the result when either (or both) the operands are NaN.
-COMPARE = function COMPARE(x, ncr) {
+function COMPARE(x, ncr) {
var left;
var right;
// Fast cases for string, numbers and undefined compares.
@@ -207,7 +138,7 @@ COMPARE = function COMPARE(x, ncr) {
}
// Strong mode COMPARE throws if an implicit conversion would be performed
-COMPARE_STRONG = function COMPARE_STRONG(x, ncr) {
+function COMPARE_STRONG(x, ncr) {
if (IS_STRING(this) && IS_STRING(x)) return %_StringCompare(this, x);
if (IS_NUMBER(this) && IS_NUMBER(x)) return %NumberCompare(this, x, ncr);
@@ -222,7 +153,7 @@ COMPARE_STRONG = function COMPARE_STRONG(x, ncr) {
*/
// ECMA-262, section 11.6.1, page 50.
-ADD = function ADD(x) {
+function ADD(x) {
// Fast case: Check for number operands and do the addition.
if (IS_NUMBER(this) && IS_NUMBER(x)) return %NumberAdd(this, x);
if (IS_STRING(this) && IS_STRING(x)) return %_StringAdd(this, x);
@@ -242,7 +173,7 @@ ADD = function ADD(x) {
// Strong mode ADD throws if an implicit conversion would be performed
-ADD_STRONG = function ADD_STRONG(x) {
+function ADD_STRONG(x) {
if (IS_NUMBER(this) && IS_NUMBER(x)) return %NumberAdd(this, x);
if (IS_STRING(this) && IS_STRING(x)) return %_StringAdd(this, x);
@@ -251,7 +182,7 @@ ADD_STRONG = function ADD_STRONG(x) {
// Left operand (this) is already a string.
-STRING_ADD_LEFT = function STRING_ADD_LEFT(y) {
+function STRING_ADD_LEFT(y) {
if (!IS_STRING(y)) {
if (IS_STRING_WRAPPER(y) && %_IsStringWrapperSafeForDefaultValueOf(y)) {
y = %_ValueOf(y);
@@ -265,17 +196,8 @@ STRING_ADD_LEFT = function STRING_ADD_LEFT(y) {
}
-// Left operand (this) is already a string.
-STRING_ADD_LEFT_STRONG = function STRING_ADD_LEFT_STRONG(y) {
- if (IS_STRING(y)) {
- return %_StringAdd(this, y);
- }
- throw %MakeTypeError(kStrongImplicitConversion);
-}
-
-
// Right operand (y) is already a string.
-STRING_ADD_RIGHT = function STRING_ADD_RIGHT(y) {
+function STRING_ADD_RIGHT(y) {
var x = this;
if (!IS_STRING(x)) {
if (IS_STRING_WRAPPER(x) && %_IsStringWrapperSafeForDefaultValueOf(x)) {
@@ -290,17 +212,8 @@ STRING_ADD_RIGHT = function STRING_ADD_RIGHT(y) {
}
-// Right operand (y) is already a string.
-STRING_ADD_RIGHT_STRONG = function STRING_ADD_RIGHT_STRONG(y) {
- if (IS_STRING(this)) {
- return %_StringAdd(this, y);
- }
- throw %MakeTypeError(kStrongImplicitConversion);
-}
-
-
// ECMA-262, section 11.6.2, page 50.
-SUB = function SUB(y) {
+function SUB(y) {
var x = IS_NUMBER(this) ? this : %$nonNumberToNumber(this);
if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
return %NumberSub(x, y);
@@ -308,7 +221,7 @@ SUB = function SUB(y) {
// Strong mode SUB throws if an implicit conversion would be performed
-SUB_STRONG = function SUB_STRONG(y) {
+function SUB_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberSub(this, y);
}
@@ -317,7 +230,7 @@ SUB_STRONG = function SUB_STRONG(y) {
// ECMA-262, section 11.5.1, page 48.
-MUL = function MUL(y) {
+function MUL(y) {
var x = IS_NUMBER(this) ? this : %$nonNumberToNumber(this);
if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
return %NumberMul(x, y);
@@ -325,7 +238,7 @@ MUL = function MUL(y) {
// Strong mode MUL throws if an implicit conversion would be performed
-MUL_STRONG = function MUL_STRONG(y) {
+function MUL_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberMul(this, y);
}
@@ -334,7 +247,7 @@ MUL_STRONG = function MUL_STRONG(y) {
// ECMA-262, section 11.5.2, page 49.
-DIV = function DIV(y) {
+function DIV(y) {
var x = IS_NUMBER(this) ? this : %$nonNumberToNumber(this);
if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
return %NumberDiv(x, y);
@@ -342,7 +255,7 @@ DIV = function DIV(y) {
// Strong mode DIV throws if an implicit conversion would be performed
-DIV_STRONG = function DIV_STRONG(y) {
+function DIV_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberDiv(this, y);
}
@@ -351,7 +264,7 @@ DIV_STRONG = function DIV_STRONG(y) {
// ECMA-262, section 11.5.3, page 49.
-MOD = function MOD(y) {
+function MOD(y) {
var x = IS_NUMBER(this) ? this : %$nonNumberToNumber(this);
if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
return %NumberMod(x, y);
@@ -359,7 +272,7 @@ MOD = function MOD(y) {
// Strong mode MOD throws if an implicit conversion would be performed
-MOD_STRONG = function MOD_STRONG(y) {
+function MOD_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberMod(this, y);
}
@@ -373,7 +286,7 @@ MOD_STRONG = function MOD_STRONG(y) {
*/
// ECMA-262, section 11.10, page 57.
-BIT_OR = function BIT_OR(y) {
+function BIT_OR(y) {
var x = IS_NUMBER(this) ? this : %$nonNumberToNumber(this);
if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
return %NumberOr(x, y);
@@ -381,7 +294,7 @@ BIT_OR = function BIT_OR(y) {
// Strong mode BIT_OR throws if an implicit conversion would be performed
-BIT_OR_STRONG = function BIT_OR_STRONG(y) {
+function BIT_OR_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberOr(this, y);
}
@@ -390,7 +303,7 @@ BIT_OR_STRONG = function BIT_OR_STRONG(y) {
// ECMA-262, section 11.10, page 57.
-BIT_AND = function BIT_AND(y) {
+function BIT_AND(y) {
var x;
if (IS_NUMBER(this)) {
x = this;
@@ -412,7 +325,7 @@ BIT_AND = function BIT_AND(y) {
// Strong mode BIT_AND throws if an implicit conversion would be performed
-BIT_AND_STRONG = function BIT_AND_STRONG(y) {
+function BIT_AND_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberAnd(this, y);
}
@@ -421,7 +334,7 @@ BIT_AND_STRONG = function BIT_AND_STRONG(y) {
// ECMA-262, section 11.10, page 57.
-BIT_XOR = function BIT_XOR(y) {
+function BIT_XOR(y) {
var x = IS_NUMBER(this) ? this : %$nonNumberToNumber(this);
if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
return %NumberXor(x, y);
@@ -429,7 +342,7 @@ BIT_XOR = function BIT_XOR(y) {
// Strong mode BIT_XOR throws if an implicit conversion would be performed
-BIT_XOR_STRONG = function BIT_XOR_STRONG(y) {
+function BIT_XOR_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberXor(this, y);
}
@@ -438,7 +351,7 @@ BIT_XOR_STRONG = function BIT_XOR_STRONG(y) {
// ECMA-262, section 11.7.1, page 51.
-SHL = function SHL(y) {
+function SHL(y) {
var x = IS_NUMBER(this) ? this : %$nonNumberToNumber(this);
if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
return %NumberShl(x, y);
@@ -446,7 +359,7 @@ SHL = function SHL(y) {
// Strong mode SHL throws if an implicit conversion would be performed
-SHL_STRONG = function SHL_STRONG(y) {
+function SHL_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberShl(this, y);
}
@@ -455,7 +368,7 @@ SHL_STRONG = function SHL_STRONG(y) {
// ECMA-262, section 11.7.2, page 51.
-SAR = function SAR(y) {
+function SAR(y) {
var x;
if (IS_NUMBER(this)) {
x = this;
@@ -477,7 +390,7 @@ SAR = function SAR(y) {
// Strong mode SAR throws if an implicit conversion would be performed
-SAR_STRONG = function SAR_STRONG(y) {
+function SAR_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberSar(this, y);
}
@@ -486,7 +399,7 @@ SAR_STRONG = function SAR_STRONG(y) {
// ECMA-262, section 11.7.3, page 52.
-SHR = function SHR(y) {
+function SHR(y) {
var x = IS_NUMBER(this) ? this : %$nonNumberToNumber(this);
if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
return %NumberShr(x, y);
@@ -494,7 +407,7 @@ SHR = function SHR(y) {
// Strong mode SHR throws if an implicit conversion would be performed
-SHR_STRONG = function SHR_STRONG(y) {
+function SHR_STRONG(y) {
if (IS_NUMBER(this) && IS_NUMBER(y)) {
return %NumberShr(this, y);
}
@@ -507,14 +420,8 @@ SHR_STRONG = function SHR_STRONG(y) {
-----------------------------
*/
-// ECMA-262, section 11.4.1, page 46.
-DELETE = function DELETE(key, language_mode) {
- return %DeleteProperty(%$toObject(this), key, language_mode);
-}
-
-
// ECMA-262, section 11.8.7, page 54.
-IN = function IN(x) {
+function IN(x) {
if (!IS_SPEC_OBJECT(x)) {
throw %MakeTypeError(kInvalidInOperatorUse, this, x);
}
@@ -532,7 +439,7 @@ IN = function IN(x) {
// efficient, the return value should be zero if the 'this' is an
// instance of F, and non-zero if not. This makes it possible to avoid
// an expensive ToBoolean conversion in the generated code.
-INSTANCE_OF = function INSTANCE_OF(F) {
+function INSTANCE_OF(F) {
var V = this;
if (!IS_SPEC_FUNCTION(F)) {
throw %MakeTypeError(kInstanceofFunctionExpected, F);
@@ -560,7 +467,7 @@ INSTANCE_OF = function INSTANCE_OF(F) {
}
-CALL_NON_FUNCTION = function CALL_NON_FUNCTION() {
+function CALL_NON_FUNCTION() {
var delegate = %GetFunctionDelegate(this);
if (!IS_FUNCTION(delegate)) {
var callsite = %RenderCallSite();
@@ -571,7 +478,7 @@ CALL_NON_FUNCTION = function CALL_NON_FUNCTION() {
}
-CALL_NON_FUNCTION_AS_CONSTRUCTOR = function CALL_NON_FUNCTION_AS_CONSTRUCTOR() {
+function CALL_NON_FUNCTION_AS_CONSTRUCTOR() {
var delegate = %GetConstructorDelegate(this);
if (!IS_FUNCTION(delegate)) {
var callsite = %RenderCallSite();
@@ -582,7 +489,7 @@ CALL_NON_FUNCTION_AS_CONSTRUCTOR = function CALL_NON_FUNCTION_AS_CONSTRUCTOR() {
}
-CALL_FUNCTION_PROXY = function CALL_FUNCTION_PROXY() {
+function CALL_FUNCTION_PROXY() {
var arity = %_ArgumentsLength() - 1;
var proxy = %_Arguments(arity); // The proxy comes in as an additional arg.
var trap = %GetCallTrap(proxy);
@@ -590,15 +497,14 @@ CALL_FUNCTION_PROXY = function CALL_FUNCTION_PROXY() {
}
-CALL_FUNCTION_PROXY_AS_CONSTRUCTOR =
- function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR () {
+function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR () {
var proxy = this;
var trap = %GetConstructTrap(proxy);
return %Apply(trap, this, arguments, 0, %_ArgumentsLength());
}
-APPLY_PREPARE = function APPLY_PREPARE(args) {
+function APPLY_PREPARE(args) {
var length;
// First check whether length is a positive Smi and args is an
// array. This is the fast case. If this fails, we do the slow case
@@ -611,7 +517,7 @@ APPLY_PREPARE = function APPLY_PREPARE(args) {
}
}
- length = (args == null) ? 0 : %$toUint32(args.length);
+ length = (args == null) ? 0 : TO_UINT32(args.length);
// We can handle any number of apply arguments if the stack is
// big enough, but sanity check the value to avoid overflow when
@@ -633,7 +539,7 @@ APPLY_PREPARE = function APPLY_PREPARE(args) {
}
-REFLECT_APPLY_PREPARE = function REFLECT_APPLY_PREPARE(args) {
+function REFLECT_APPLY_PREPARE(args) {
var length;
// First check whether length is a positive Smi and args is an
// array. This is the fast case. If this fails, we do the slow case
@@ -667,7 +573,7 @@ REFLECT_APPLY_PREPARE = function REFLECT_APPLY_PREPARE(args) {
}
-REFLECT_CONSTRUCT_PREPARE = function REFLECT_CONSTRUCT_PREPARE(
+function REFLECT_CONSTRUCT_PREPARE(
args, newTarget) {
var length;
var ctorOk = IS_SPEC_FUNCTION(this) && %IsConstructor(this);
@@ -717,86 +623,34 @@ REFLECT_CONSTRUCT_PREPARE = function REFLECT_CONSTRUCT_PREPARE(
}
-CONCAT_ITERABLE_TO_ARRAY = function CONCAT_ITERABLE_TO_ARRAY(iterable) {
+function CONCAT_ITERABLE_TO_ARRAY(iterable) {
return %$concatIterableToArray(this, iterable);
};
-STACK_OVERFLOW = function STACK_OVERFLOW(length) {
+function STACK_OVERFLOW(length) {
throw %MakeRangeError(kStackOverflow);
}
-// Convert the receiver to an object - forward to ToObject.
-TO_OBJECT = function TO_OBJECT() {
- return %$toObject(this);
-}
-
-
// Convert the receiver to a number - forward to ToNumber.
-TO_NUMBER = function TO_NUMBER() {
+function TO_NUMBER() {
return %$toNumber(this);
}
// Convert the receiver to a string - forward to ToString.
-TO_STRING = function TO_STRING() {
+function TO_STRING() {
return %$toString(this);
}
// Convert the receiver to a string or symbol - forward to ToName.
-TO_NAME = function TO_NAME() {
+function TO_NAME() {
return %$toName(this);
}
-/* -----------------------------------------------
- - - - J a v a S c r i p t S t u b s - - -
- -----------------------------------------------
-*/
-
-StringLengthTFStub = function StringLengthTFStub(call_conv, minor_key) {
- var stub = function(receiver, name, i, v) {
- // i and v are dummy parameters mandated by the InterfaceDescriptor,
- // (LoadWithVectorDescriptor).
- return %_StringGetLength(%_JSValueGetValue(receiver));
- }
- return stub;
-}
-
-StringAddTFStub = function StringAddTFStub(call_conv, minor_key) {
- var stub = function(left, right) {
- return %StringAdd(left, right);
- }
- return stub;
-}
-
-MathFloorStub = function MathFloorStub(call_conv, minor_key) {
- var stub = function(f, i, v) {
- // |f| is calling function's JSFunction
- // |i| is TypeFeedbackVector slot # of callee's CallIC for Math.floor call
- // |v| is the value to floor
- var r = %_MathFloor(+v);
- if (%_IsMinusZero(r)) {
- // Collect type feedback when the result of the floor is -0. This is
- // accomplished by storing a sentinel in the second, "extra"
- // TypeFeedbackVector slot corresponding to the Math.floor CallIC call in
- // the caller's TypeVector.
- %_FixedArraySet(%_GetTypeFeedbackVector(f), ((i|0)+1)|0, 1);
- return -0;
- }
- // Return integers in smi range as smis.
- var trunc = r|0;
- if (trunc === r) {
- return trunc;
- }
- return r;
- }
- return stub;
-}
-
-
/* -------------------------------------
- - - C o n v e r s i o n s - - -
-------------------------------------
@@ -805,11 +659,7 @@ MathFloorStub = function MathFloorStub(call_conv, minor_key) {
// ECMA-262, section 9.1, page 30. Use null/undefined for no hint,
// (1) for number hint, and (2) for string hint.
function ToPrimitive(x, hint) {
- // Fast case check.
- if (IS_STRING(x)) return x;
- // Normal behavior.
if (!IS_SPEC_OBJECT(x)) return x;
- if (IS_SYMBOL_WRAPPER(x)) throw MakeTypeError(kSymbolToPrimitive);
if (hint == NO_HINT) hint = (IS_DATE(x)) ? STRING_HINT : NUMBER_HINT;
return (hint == NUMBER_HINT) ? DefaultNumber(x) : DefaultString(x);
}
@@ -834,7 +684,7 @@ function ToNumber(x) {
}
if (IS_BOOLEAN(x)) return x ? 1 : 0;
if (IS_UNDEFINED(x)) return NAN;
- if (IS_SYMBOL(x)) throw MakeTypeError(kSymbolToNumber);
+ // Types that can't be converted to number are caught in DefaultNumber.
return (IS_NULL(x)) ? 0 : ToNumber(DefaultNumber(x));
}
@@ -845,7 +695,7 @@ function NonNumberToNumber(x) {
}
if (IS_BOOLEAN(x)) return x ? 1 : 0;
if (IS_UNDEFINED(x)) return NAN;
- if (IS_SYMBOL(x)) throw MakeTypeError(kSymbolToNumber);
+ // Types that can't be converted to number are caught in DefaultNumber.
return (IS_NULL(x)) ? 0 : ToNumber(DefaultNumber(x));
}
@@ -856,7 +706,7 @@ function ToString(x) {
if (IS_NUMBER(x)) return %_NumberToString(x);
if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
if (IS_UNDEFINED(x)) return 'undefined';
- if (IS_SYMBOL(x)) throw MakeTypeError(kSymbolToString);
+ // Types that can't be converted to string are caught in DefaultString.
return (IS_NULL(x)) ? 'null' : ToString(DefaultString(x));
}
@@ -864,7 +714,7 @@ function NonStringToString(x) {
if (IS_NUMBER(x)) return %_NumberToString(x);
if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
if (IS_UNDEFINED(x)) return 'undefined';
- if (IS_SYMBOL(x)) throw MakeTypeError(kSymbolToString);
+ // Types that can't be converted to string are caught in DefaultString.
return (IS_NULL(x)) ? 'null' : ToString(DefaultString(x));
}
@@ -875,19 +725,6 @@ function ToName(x) {
}
-// ECMA-262, section 9.9, page 36.
-function ToObject(x) {
- if (IS_STRING(x)) return new GlobalString(x);
- if (IS_NUMBER(x)) return new GlobalNumber(x);
- if (IS_BOOLEAN(x)) return new GlobalBoolean(x);
- if (IS_SYMBOL(x)) return %NewSymbolWrapper(x);
- if (IS_NULL_OR_UNDEFINED(x) && !IS_UNDETECTABLE(x)) {
- throw MakeTypeError(kUndefinedOrNullToObject);
- }
- return x;
-}
-
-
// ECMA-262, section 9.4, page 34.
function ToInteger(x) {
if (%_IsSmi(x)) return x;
@@ -904,20 +741,6 @@ function ToLength(arg) {
}
-// ECMA-262, section 9.6, page 34.
-function ToUint32(x) {
- if (%_IsSmi(x) && x >= 0) return x;
- return %NumberToJSUint32(ToNumber(x));
-}
-
-
-// ECMA-262, section 9.5, page 34
-function ToInt32(x) {
- if (%_IsSmi(x)) return x;
- return %NumberToJSInt32(ToNumber(x));
-}
-
-
// ES5, section 9.12
function SameValue(x, y) {
if (typeof x != typeof y) return false;
@@ -928,6 +751,7 @@ function SameValue(x, y) {
return false;
}
}
+ if (IS_SIMD_VALUE(x)) return %SimdSameValue(x, y);
return x === y;
}
@@ -938,9 +762,11 @@ function SameValueZero(x, y) {
if (IS_NUMBER(x)) {
if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
}
+ if (IS_SIMD_VALUE(x)) return %SimdSameValueZero(x, y);
return x === y;
}
+
function ConcatIterableToArray(target, iterable) {
var index = target.length;
for (var element of iterable) {
@@ -976,18 +802,17 @@ function IsConcatSpreadable(O) {
// ECMA-262, section 8.6.2.6, page 28.
function DefaultNumber(x) {
- if (!IS_SYMBOL_WRAPPER(x)) {
- var valueOf = x.valueOf;
- if (IS_SPEC_FUNCTION(valueOf)) {
- var v = %_CallFunction(x, valueOf);
- if (IsPrimitive(v)) return v;
- }
-
- var toString = x.toString;
- if (IS_SPEC_FUNCTION(toString)) {
- var s = %_CallFunction(x, toString);
- if (IsPrimitive(s)) return s;
- }
+ var valueOf = x.valueOf;
+ if (IS_SPEC_FUNCTION(valueOf)) {
+ var v = %_CallFunction(x, valueOf);
+ if (IS_SYMBOL(v)) throw MakeTypeError(kSymbolToNumber);
+ if (IS_SIMD_VALUE(x)) throw MakeTypeError(kSimdToNumber);
+ if (IsPrimitive(v)) return v;
+ }
+ var toString = x.toString;
+ if (IS_SPEC_FUNCTION(toString)) {
+ var s = %_CallFunction(x, toString);
+ if (IsPrimitive(s)) return s;
}
throw MakeTypeError(kCannotConvertToPrimitive);
}
@@ -995,6 +820,7 @@ function DefaultNumber(x) {
// ECMA-262, section 8.6.2.6, page 28.
function DefaultString(x) {
if (!IS_SYMBOL_WRAPPER(x)) {
+ if (IS_SYMBOL(x)) throw MakeTypeError(kSymbolToString);
var toString = x.toString;
if (IS_SPEC_FUNCTION(toString)) {
var s = %_CallFunction(x, toString);
@@ -1025,7 +851,8 @@ function ToPositiveInteger(x, rangeErrorIndex) {
// boilerplate gets the right prototype.
%FunctionSetPrototype(GlobalArray, new GlobalArray(0));
-//----------------------------------------------------------------------------
+// ----------------------------------------------------------------------------
+// Exports
$concatIterableToArray = ConcatIterableToArray;
$defaultNumber = DefaultNumber;
@@ -1035,16 +862,69 @@ $nonNumberToNumber = NonNumberToNumber;
$nonStringToString = NonStringToString;
$sameValue = SameValue;
$sameValueZero = SameValueZero;
-$toBoolean = ToBoolean;
-$toInt32 = ToInt32;
$toInteger = ToInteger;
$toLength = ToLength;
$toName = ToName;
$toNumber = ToNumber;
-$toObject = ToObject;
$toPositiveInteger = ToPositiveInteger;
$toPrimitive = ToPrimitive;
$toString = ToString;
-$toUint32 = ToUint32;
+
+%InstallJSBuiltins({
+ EQUALS,
+ COMPARE,
+ COMPARE_STRONG,
+ ADD,
+ ADD_STRONG,
+ STRING_ADD_LEFT,
+ STRING_ADD_RIGHT,
+ SUB,
+ SUB_STRONG,
+ MUL,
+ MUL_STRONG,
+ DIV,
+ DIV_STRONG,
+ MOD,
+ MOD_STRONG,
+ BIT_OR,
+ BIT_OR_STRONG,
+ BIT_AND,
+ BIT_AND_STRONG,
+ BIT_XOR,
+ BIT_XOR_STRONG,
+ SHL,
+ SHL_STRONG,
+ SAR,
+ SAR_STRONG,
+ SHR,
+ SHR_STRONG,
+ IN,
+ INSTANCE_OF,
+ CALL_NON_FUNCTION,
+ CALL_NON_FUNCTION_AS_CONSTRUCTOR,
+ CALL_FUNCTION_PROXY,
+ CALL_FUNCTION_PROXY_AS_CONSTRUCTOR,
+ CONCAT_ITERABLE_TO_ARRAY,
+ APPLY_PREPARE,
+ REFLECT_APPLY_PREPARE,
+ REFLECT_CONSTRUCT_PREPARE,
+ STACK_OVERFLOW,
+ TO_NUMBER,
+ TO_STRING,
+ TO_NAME,
+});
+
+utils.ExportToRuntime(function(to) {
+ to.ToNumber = ToNumber;
+ to.ToString = ToString;
+ to.ToInteger = ToInteger;
+ to.ToLength = ToLength;
+});
+
+utils.Export(function(to) {
+ to.ToBoolean = ToBoolean;
+ to.ToNumber = ToNumber;
+ to.ToString = ToString;
+})
})
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index d00df71576..fa0d91bf23 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -2,12 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/conversions-inl.h"
#include "src/elements.h"
+#include "src/factory.h"
#include "src/messages.h"
-#include "src/runtime/runtime-utils.h"
+#include "src/prototype.h"
namespace v8 {
namespace internal {
@@ -133,7 +135,7 @@ class ArrayConcatVisitor {
~ArrayConcatVisitor() { clear_storage(); }
void visit(uint32_t i, Handle<Object> elm) {
- if (i > JSObject::kMaxElementCount - index_offset_) {
+ if (i >= JSObject::kMaxElementCount - index_offset_) {
set_exceeds_array_limit(true);
return;
}
@@ -154,8 +156,10 @@ class ArrayConcatVisitor {
DCHECK(!fast_elements());
Handle<SeededNumberDictionary> dict(
SeededNumberDictionary::cast(*storage_));
+ // The object holding this backing store has just been allocated, so
+ // it cannot yet be used as a prototype.
Handle<SeededNumberDictionary> result =
- SeededNumberDictionary::AtNumberPut(dict, index, elm);
+ SeededNumberDictionary::AtNumberPut(dict, index, elm, false);
if (!result.is_identical_to(dict)) {
// Dictionary needed to grow.
clear_storage();
@@ -207,8 +211,11 @@ class ArrayConcatVisitor {
HandleScope loop_scope(isolate_);
Handle<Object> element(current_storage->get(i), isolate_);
if (!element->IsTheHole()) {
+ // The object holding this backing store has just been allocated, so
+ // it cannot yet be used as a prototype.
Handle<SeededNumberDictionary> new_storage =
- SeededNumberDictionary::AtNumberPut(slow_storage, i, element);
+ SeededNumberDictionary::AtNumberPut(slow_storage, i, element,
+ false);
if (!new_storage.is_identical_to(slow_storage)) {
slow_storage = loop_scope.CloseAndEscape(new_storage);
}
@@ -298,7 +305,6 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case EXTERNAL_##TYPE##_ELEMENTS: \
case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -418,7 +424,6 @@ static void CollectElementIndices(Handle<JSObject> object, uint32_t range,
}
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case TYPE##_ELEMENTS: \
- case EXTERNAL_##TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -439,12 +444,8 @@ static void CollectElementIndices(Handle<JSObject> object, uint32_t range,
}
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
- MaybeHandle<Object> length_obj =
- Object::GetProperty(object, isolate->factory()->length_string());
- double length_num = length_obj.ToHandleChecked()->Number();
- uint32_t length = static_cast<uint32_t>(DoubleToInt32(length_num));
ElementsAccessor* accessor = object->GetElementsAccessor();
- for (uint32_t i = 0; i < length; i++) {
+ for (uint32_t i = 0; i < range; i++) {
if (accessor->HasElement(object, i)) {
indices->Add(i);
}
@@ -472,9 +473,9 @@ static bool IterateElementsSlow(Isolate* isolate, Handle<JSObject> receiver,
if (!maybe.IsJust()) return false;
if (maybe.FromJust()) {
Handle<Object> element_value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element_value,
- Runtime::GetElementOrCharAt(isolate, receiver, i), false);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_value,
+ Object::GetElement(isolate, receiver, i),
+ false);
visitor->visit(i, element_value);
}
}
@@ -610,15 +611,6 @@ static bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
}
break;
}
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS: {
- Handle<ExternalUint8ClampedArray> pixels(
- ExternalUint8ClampedArray::cast(receiver->elements()));
- for (uint32_t j = 0; j < length; j++) {
- Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)), isolate);
- visitor->visit(j, e);
- }
- break;
- }
case UINT8_CLAMPED_ELEMENTS: {
Handle<FixedUint8ClampedArray> pixels(
FixedUint8ClampedArray::cast(receiver->elements()));
@@ -628,81 +620,41 @@ static bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
}
break;
}
- case EXTERNAL_INT8_ELEMENTS: {
- IterateTypedArrayElements<ExternalInt8Array, int8_t>(
- isolate, receiver, true, true, visitor);
- break;
- }
case INT8_ELEMENTS: {
IterateTypedArrayElements<FixedInt8Array, int8_t>(
isolate, receiver, true, true, visitor);
break;
}
- case EXTERNAL_UINT8_ELEMENTS: {
- IterateTypedArrayElements<ExternalUint8Array, uint8_t>(
- isolate, receiver, true, true, visitor);
- break;
- }
case UINT8_ELEMENTS: {
IterateTypedArrayElements<FixedUint8Array, uint8_t>(
isolate, receiver, true, true, visitor);
break;
}
- case EXTERNAL_INT16_ELEMENTS: {
- IterateTypedArrayElements<ExternalInt16Array, int16_t>(
- isolate, receiver, true, true, visitor);
- break;
- }
case INT16_ELEMENTS: {
IterateTypedArrayElements<FixedInt16Array, int16_t>(
isolate, receiver, true, true, visitor);
break;
}
- case EXTERNAL_UINT16_ELEMENTS: {
- IterateTypedArrayElements<ExternalUint16Array, uint16_t>(
- isolate, receiver, true, true, visitor);
- break;
- }
case UINT16_ELEMENTS: {
IterateTypedArrayElements<FixedUint16Array, uint16_t>(
isolate, receiver, true, true, visitor);
break;
}
- case EXTERNAL_INT32_ELEMENTS: {
- IterateTypedArrayElements<ExternalInt32Array, int32_t>(
- isolate, receiver, true, false, visitor);
- break;
- }
case INT32_ELEMENTS: {
IterateTypedArrayElements<FixedInt32Array, int32_t>(
isolate, receiver, true, false, visitor);
break;
}
- case EXTERNAL_UINT32_ELEMENTS: {
- IterateTypedArrayElements<ExternalUint32Array, uint32_t>(
- isolate, receiver, true, false, visitor);
- break;
- }
case UINT32_ELEMENTS: {
IterateTypedArrayElements<FixedUint32Array, uint32_t>(
isolate, receiver, true, false, visitor);
break;
}
- case EXTERNAL_FLOAT32_ELEMENTS: {
- IterateTypedArrayElements<ExternalFloat32Array, float>(
- isolate, receiver, false, false, visitor);
- break;
- }
case FLOAT32_ELEMENTS: {
IterateTypedArrayElements<FixedFloat32Array, float>(
isolate, receiver, false, false, visitor);
break;
}
- case EXTERNAL_FLOAT64_ELEMENTS: {
- IterateTypedArrayElements<ExternalFloat64Array, double>(
- isolate, receiver, false, false, visitor);
- break;
- }
case FLOAT64_ELEMENTS: {
IterateTypedArrayElements<FixedFloat64Array, double>(
isolate, receiver, false, false, visitor);
@@ -1113,8 +1065,8 @@ static Object* ArrayConstructorCommon(Isolate* isolate,
allocation_site = site;
}
- array = Handle<JSArray>::cast(factory->NewJSObjectFromMap(
- initial_map, NOT_TENURED, true, allocation_site));
+ array = Handle<JSArray>::cast(
+ factory->NewJSObjectFromMap(initial_map, NOT_TENURED, allocation_site));
} else {
array = Handle<JSArray>::cast(factory->NewJSObject(constructor));
@@ -1233,8 +1185,7 @@ RUNTIME_FUNCTION(Runtime_NormalizeElements) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
- RUNTIME_ASSERT(!array->HasExternalArrayElements() &&
- !array->HasFixedTypedArrayElements() &&
+ RUNTIME_ASSERT(!array->HasFixedTypedArrayElements() &&
!array->IsJSGlobalProxy());
JSObject::NormalizeElements(array);
return *array;
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index c9b78769cd..9b9fa0b12d 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
-#include "src/conversions.h"
-#include "src/runtime/runtime-utils.h"
+#include "src/conversions-inl.h"
+#include "src/factory.h"
// Implement Atomic accesses to SharedArrayBuffers as defined in the
// SharedArrayBuffer draft spec, found here
@@ -103,69 +103,79 @@ inline void StoreSeqCst(uint64_t* p, uint64_t value) {
#elif V8_CC_MSVC
-#define _InterlockedCompareExchange32 _InterlockedCompareExchange
-#define _InterlockedExchange32 _InterlockedExchange
-#define _InterlockedExchangeAdd32 _InterlockedExchangeAdd
-#define _InterlockedAnd32 _InterlockedAnd
-#define _InterlockedOr32 _InterlockedOr
-#define _InterlockedXor32 _InterlockedXor
-
-#define INTEGER_TYPES(V) \
- V(int8_t, 8, char) \
- V(uint8_t, 8, char) \
- V(int16_t, 16, short) /* NOLINT(runtime/int) */ \
- V(uint16_t, 16, short) /* NOLINT(runtime/int) */ \
- V(int32_t, 32, long) /* NOLINT(runtime/int) */ \
- V(uint32_t, 32, long) /* NOLINT(runtime/int) */ \
- V(int64_t, 64, LONGLONG) \
- V(uint64_t, 64, LONGLONG)
-
-#define ATOMIC_OPS(type, suffix, vctype) \
- inline type CompareExchangeSeqCst(volatile type* p, type oldval, \
- type newval) { \
- return _InterlockedCompareExchange##suffix( \
- reinterpret_cast<volatile vctype*>(p), bit_cast<vctype>(newval), \
- bit_cast<vctype>(oldval)); \
- } \
- inline type LoadSeqCst(volatile type* p) { return *p; } \
- inline void StoreSeqCst(volatile type* p, type value) { \
- _InterlockedExchange##suffix(reinterpret_cast<volatile vctype*>(p), \
- bit_cast<vctype>(value)); \
- } \
- inline type AddSeqCst(volatile type* p, type value) { \
- return _InterlockedExchangeAdd##suffix( \
- reinterpret_cast<volatile vctype*>(p), bit_cast<vctype>(value)); \
- } \
- inline type SubSeqCst(volatile type* p, type value) { \
- return _InterlockedExchangeAdd##suffix( \
- reinterpret_cast<volatile vctype*>(p), -bit_cast<vctype>(value)); \
- } \
- inline type AndSeqCst(volatile type* p, type value) { \
- return _InterlockedAnd##suffix(reinterpret_cast<volatile vctype*>(p), \
- bit_cast<vctype>(value)); \
- } \
- inline type OrSeqCst(volatile type* p, type value) { \
- return _InterlockedOr##suffix(reinterpret_cast<volatile vctype*>(p), \
- bit_cast<vctype>(value)); \
- } \
- inline type XorSeqCst(volatile type* p, type value) { \
- return _InterlockedXor##suffix(reinterpret_cast<volatile vctype*>(p), \
- bit_cast<vctype>(value)); \
- } \
- inline type ExchangeSeqCst(volatile type* p, type value) { \
- return _InterlockedExchange##suffix(reinterpret_cast<volatile vctype*>(p), \
- bit_cast<vctype>(value)); \
+#define InterlockedCompareExchange32 _InterlockedCompareExchange
+#define InterlockedExchange32 _InterlockedExchange
+#define InterlockedExchangeAdd32 _InterlockedExchangeAdd
+#define InterlockedAnd32 _InterlockedAnd
+#define InterlockedOr32 _InterlockedOr
+#define InterlockedXor32 _InterlockedXor
+#define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
+#define InterlockedCompareExchange8 _InterlockedCompareExchange8
+#define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
+
+#define ATOMIC_OPS_INTEGER(type, suffix, vctype) \
+ inline type AddSeqCst(type* p, type value) { \
+ return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
+ bit_cast<vctype>(value)); \
+ } \
+ inline type SubSeqCst(type* p, type value) { \
+ return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
+ -bit_cast<vctype>(value)); \
+ } \
+ inline type AndSeqCst(type* p, type value) { \
+ return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \
+ bit_cast<vctype>(value)); \
+ } \
+ inline type OrSeqCst(type* p, type value) { \
+ return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \
+ bit_cast<vctype>(value)); \
+ } \
+ inline type XorSeqCst(type* p, type value) { \
+ return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
+ bit_cast<vctype>(value)); \
+ } \
+ inline type ExchangeSeqCst(type* p, type value) { \
+ return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
+ bit_cast<vctype>(value)); \
+ }
+
+#define ATOMIC_OPS_FLOAT(type, suffix, vctype) \
+ inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \
+ return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
+ bit_cast<vctype>(newval), \
+ bit_cast<vctype>(oldval)); \
+ } \
+ inline type LoadSeqCst(type* p) { return *p; } \
+ inline void StoreSeqCst(type* p, type value) { \
+ InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
+ bit_cast<vctype>(value)); \
}
-INTEGER_TYPES(ATOMIC_OPS)
+
+#define ATOMIC_OPS(type, suffix, vctype) \
+ ATOMIC_OPS_INTEGER(type, suffix, vctype) \
+ ATOMIC_OPS_FLOAT(type, suffix, vctype)
+
+ATOMIC_OPS(int8_t, 8, char)
+ATOMIC_OPS(uint8_t, 8, char)
+ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */
+ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
+ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */
+ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
+ATOMIC_OPS_FLOAT(uint64_t, 64, LONGLONG)
+
+#undef ATOMIC_OPS_INTEGER
+#undef ATOMIC_OPS_FLOAT
#undef ATOMIC_OPS
-#undef INTEGER_TYPES
-#undef _InterlockedCompareExchange32
-#undef _InterlockedExchange32
-#undef _InterlockedExchangeAdd32
-#undef _InterlockedAnd32
-#undef _InterlockedOr32
-#undef _InterlockedXor32
+#undef InterlockedCompareExchange32
+#undef InterlockedExchange32
+#undef InterlockedExchangeAdd32
+#undef InterlockedAnd32
+#undef InterlockedOr32
+#undef InterlockedXor32
+#undef InterlockedExchangeAdd16
+#undef InterlockedCompareExchange8
+#undef InterlockedExchangeAdd8
#else
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 97a19c1bc7..8692b9b800 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/runtime/runtime-utils.h"
+
#include <stdlib.h>
#include <limits>
-#include "src/v8.h"
-
#include "src/arguments.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
+#include "src/frames-inl.h"
#include "src/messages.h"
#include "src/runtime/runtime.h"
-#include "src/runtime/runtime-utils.h"
-
namespace v8 {
namespace internal {
@@ -93,16 +92,10 @@ RUNTIME_FUNCTION(Runtime_HomeObjectSymbol) {
}
-RUNTIME_FUNCTION(Runtime_DefineClass) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 6);
- CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, super_class, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 2);
- CONVERT_ARG_HANDLE_CHECKED(Script, script, 3);
- CONVERT_SMI_ARG_CHECKED(start_position, 4);
- CONVERT_SMI_ARG_CHECKED(end_position, 5);
-
+static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
+ Handle<Object> super_class,
+ Handle<JSFunction> constructor,
+ int start_position, int end_position) {
Handle<Object> prototype_parent;
Handle<Object> constructor_parent;
@@ -113,31 +106,38 @@ RUNTIME_FUNCTION(Runtime_DefineClass) {
prototype_parent = isolate->factory()->null_value();
} else if (super_class->IsSpecFunction()) {
if (Handle<JSFunction>::cast(super_class)->shared()->is_generator()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
+ THROW_NEW_ERROR(
isolate,
- NewTypeError(MessageTemplate::kExtendsValueGenerator, super_class));
+ NewTypeError(MessageTemplate::kExtendsValueGenerator, super_class),
+ Object);
}
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ ASSIGN_RETURN_ON_EXCEPTION(
isolate, prototype_parent,
Runtime::GetObjectProperty(isolate, super_class,
isolate->factory()->prototype_string(),
- SLOPPY));
+ SLOPPY),
+ Object);
if (!prototype_parent->IsNull() && !prototype_parent->IsSpecObject()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
+ THROW_NEW_ERROR(
isolate, NewTypeError(MessageTemplate::kPrototypeParentNotAnObject,
- prototype_parent));
+ prototype_parent),
+ Object);
}
constructor_parent = super_class;
} else {
// TODO(arv): Should be IsConstructor.
- THROW_NEW_ERROR_RETURN_FAILURE(
+ THROW_NEW_ERROR(
isolate,
- NewTypeError(MessageTemplate::kExtendsValueNotFunction, super_class));
+ NewTypeError(MessageTemplate::kExtendsValueNotFunction, super_class),
+ Object);
}
}
Handle<Map> map =
isolate->factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ if (constructor->map()->is_strong()) {
+ map->set_is_strong();
+ }
Map::SetPrototype(map, prototype_parent);
map->SetConstructor(*constructor);
Handle<JSObject> prototype = isolate->factory()->NewJSObjectFromMap(map);
@@ -155,42 +155,81 @@ RUNTIME_FUNCTION(Runtime_DefineClass) {
JSFunction::SetPrototype(constructor, prototype);
PropertyAttributes attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::SetOwnPropertyIgnoreAttributes(
- constructor, isolate->factory()->prototype_string(),
- prototype, attribs));
+ RETURN_ON_EXCEPTION(isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ constructor, isolate->factory()->prototype_string(),
+ prototype, attribs),
+ Object);
// TODO(arv): Only do this conditionally.
Handle<Symbol> home_object_symbol(isolate->heap()->home_object_symbol());
- RETURN_FAILURE_ON_EXCEPTION(
+ RETURN_ON_EXCEPTION(
isolate, JSObject::SetOwnPropertyIgnoreAttributes(
- constructor, home_object_symbol, prototype, DONT_ENUM));
+ constructor, home_object_symbol, prototype, DONT_ENUM),
+ Object);
if (!constructor_parent.is_null()) {
- RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- JSObject::SetPrototype(constructor, constructor_parent, false));
+ RETURN_ON_EXCEPTION(
+ isolate, JSObject::SetPrototype(constructor, constructor_parent, false),
+ Object);
}
JSObject::AddProperty(prototype, isolate->factory()->constructor_string(),
constructor, DONT_ENUM);
// Install private properties that are used to construct the FunctionToString.
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, Object::SetProperty(constructor,
- isolate->factory()->class_script_symbol(),
- script, STRICT));
- RETURN_FAILURE_ON_EXCEPTION(
+ RETURN_ON_EXCEPTION(
isolate,
Object::SetProperty(
constructor, isolate->factory()->class_start_position_symbol(),
- handle(Smi::FromInt(start_position), isolate), STRICT));
- RETURN_FAILURE_ON_EXCEPTION(
+ handle(Smi::FromInt(start_position), isolate), STRICT),
+ Object);
+ RETURN_ON_EXCEPTION(
isolate, Object::SetProperty(
constructor, isolate->factory()->class_end_position_symbol(),
- handle(Smi::FromInt(end_position), isolate), STRICT));
+ handle(Smi::FromInt(end_position), isolate), STRICT),
+ Object);
- return *constructor;
+ return constructor;
+}
+
+
+RUNTIME_FUNCTION(Runtime_DefineClass) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 5);
+ CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, super_class, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 2);
+ CONVERT_SMI_ARG_CHECKED(start_position, 3);
+ CONVERT_SMI_ARG_CHECKED(end_position, 4);
+
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, DefineClass(isolate, name, super_class, constructor,
+ start_position, end_position));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_DefineClassStrong) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 5);
+ CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, super_class, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 2);
+ CONVERT_SMI_ARG_CHECKED(start_position, 3);
+ CONVERT_SMI_ARG_CHECKED(end_position, 4);
+
+ if (super_class->IsNull()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kStrongExtendNull));
+ }
+
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, DefineClass(isolate, name, super_class, constructor,
+ start_position, end_position));
+ return *result;
}
@@ -208,37 +247,46 @@ RUNTIME_FUNCTION(Runtime_DefineClassMethod) {
}
+RUNTIME_FUNCTION(Runtime_FinalizeClassDefinition) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, constructor, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, prototype, 1);
+
+ JSObject::MigrateSlowToFast(prototype, 0, "RuntimeToFastProperties");
+ JSObject::MigrateSlowToFast(constructor, 0, "RuntimeToFastProperties");
+
+ if (constructor->map()->is_strong()) {
+ DCHECK(prototype->map()->is_strong());
+ RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::Freeze(prototype));
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::Freeze(constructor));
+ return *result;
+ }
+ return *constructor;
+}
+
+
RUNTIME_FUNCTION(Runtime_ClassGetSourceCode) {
HandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- Handle<Object> script;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, script,
- Object::GetProperty(fun, isolate->factory()->class_script_symbol()));
- if (!script->IsScript()) {
- return isolate->heap()->undefined_value();
- }
-
Handle<Symbol> start_position_symbol(
isolate->heap()->class_start_position_symbol());
- Handle<Object> start_position;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, start_position, Object::GetProperty(fun, start_position_symbol));
+ Handle<Object> start_position =
+ JSReceiver::GetDataProperty(fun, start_position_symbol);
+ if (!start_position->IsSmi()) return isolate->heap()->undefined_value();
Handle<Symbol> end_position_symbol(
isolate->heap()->class_end_position_symbol());
- Handle<Object> end_position;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, end_position, Object::GetProperty(fun, end_position_symbol));
-
- if (!start_position->IsSmi() || !end_position->IsSmi() ||
- !Handle<Script>::cast(script)->HasValidSource()) {
- return isolate->ThrowIllegalOperation();
- }
+ Handle<Object> end_position =
+ JSReceiver::GetDataProperty(fun, end_position_symbol);
+ CHECK(end_position->IsSmi());
- Handle<String> source(String::cast(Handle<Script>::cast(script)->source()));
+ Handle<String> source(
+ String::cast(Script::cast(fun->shared()->script())->source()));
return *isolate->factory()->NewSubString(
source, Handle<Smi>::cast(start_position)->value(),
Handle<Smi>::cast(end_position)->value());
@@ -476,14 +524,49 @@ RUNTIME_FUNCTION(Runtime_HandleStepInForDerivedConstructors) {
RUNTIME_FUNCTION(Runtime_DefaultConstructorCallSuper) {
- UNIMPLEMENTED();
- return nullptr;
-}
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, original_constructor, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, actual_constructor, 1);
+ JavaScriptFrameIterator it(isolate);
+ // Prepare the callee to the super call. The super constructor is stored as
+ // the prototype of the constructor we are currently executing.
+ Handle<Object> super_constructor;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, super_constructor,
+ Runtime::GetPrototype(isolate, actual_constructor));
+
+ // Find the frame that holds the actual arguments passed to the function.
+ it.AdvanceToArgumentsFrame();
+ JavaScriptFrame* frame = it.frame();
+
+ // Prepare the array containing all passed arguments.
+ int argument_count = frame->GetArgumentsLength();
+ Handle<FixedArray> elements =
+ isolate->factory()->NewUninitializedFixedArray(argument_count);
+ for (int i = 0; i < argument_count; ++i) {
+ elements->set(i, frame->GetParameter(i));
+ }
+ Handle<JSArray> arguments = isolate->factory()->NewJSArrayWithElements(
+ elements, FAST_ELEMENTS, argument_count);
+
+ // Call $reflectConstruct(<super>, <args>, <new.target>) now.
+ Handle<Object> reflect;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, reflect,
+ Object::GetProperty(isolate,
+ handle(isolate->native_context()->builtins()),
+ "$reflectConstruct"));
+ RUNTIME_ASSERT(reflect->IsJSFunction()); // Depends on --harmony-reflect.
+ Handle<Object> argv[] = {super_constructor, arguments, original_constructor};
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, reflect, isolate->factory()->undefined_value(),
+ arraysize(argv), argv));
-RUNTIME_FUNCTION(Runtime_CallSuperWithSpread) {
- UNIMPLEMENTED();
- return nullptr;
+ return *result;
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 1ba1e34356..3450fca0e2 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
-#include "src/arguments.h"
#include "src/runtime/runtime-utils.h"
+#include "src/arguments.h"
+#include "src/conversions-inl.h"
+#include "src/factory.h"
namespace v8 {
namespace internal {
@@ -299,7 +299,7 @@ RUNTIME_FUNCTION(Runtime_MapIteratorNext) {
void Runtime::WeakCollectionInitialize(
Isolate* isolate, Handle<JSWeakCollection> weak_collection) {
- DCHECK(weak_collection->map()->inobject_properties() == 0);
+ DCHECK_EQ(0, weak_collection->map()->GetInObjectProperties());
Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 0);
weak_collection->set_table(*table);
}
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 4cfa84ac44..e7f567f885 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
#include "src/compiler.h"
#include "src/deoptimizer.h"
-#include "src/frames.h"
-#include "src/full-codegen.h"
+#include "src/frames-inl.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/messages.h"
-#include "src/runtime/runtime-utils.h"
#include "src/v8threads.h"
#include "src/vm-state-inl.h"
@@ -35,8 +34,8 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
Handle<Code> code;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, code,
Compiler::GetLazyCode(function));
- DCHECK(code->kind() == Code::FUNCTION ||
- code->kind() == Code::OPTIMIZED_FUNCTION);
+ DCHECK(code->IsJavaScriptCode());
+
function->ReplaceCode(*code);
return *code;
}
@@ -132,9 +131,7 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
RUNTIME_ASSERT(frame->function()->IsJSFunction());
DCHECK(frame->function() == *function);
- // Avoid doing too much work when running with --always-opt and keep
- // the optimized code around.
- if (FLAG_always_opt || type == Deoptimizer::LAZY) {
+ if (type == Deoptimizer::LAZY) {
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-date.cc b/deps/v8/src/runtime/runtime-date.cc
index c47b158564..4231d82c34 100644
--- a/deps/v8/src/runtime/runtime-date.cc
+++ b/deps/v8/src/runtime/runtime-date.cc
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/conversions-inl.h"
#include "src/date.h"
#include "src/dateparser-inl.h"
+#include "src/factory.h"
#include "src/messages.h"
-#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index e7aaed1f6f..1cd524f17c 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
-#include "src/accessors.h"
#include "src/arguments.h"
-#include "src/compiler.h"
-#include "src/debug.h"
-#include "src/deoptimizer.h"
-#include "src/parser.h"
+#include "src/debug/debug.h"
+#include "src/debug/debug-evaluate.h"
+#include "src/debug/debug-frames.h"
+#include "src/debug/debug-scopes.h"
+#include "src/frames-inl.h"
#include "src/runtime/runtime.h"
-#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
@@ -19,20 +18,19 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_DebugBreak) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
- isolate->debug()->HandleDebugBreak();
+ // Get the top-most JavaScript frame.
+ JavaScriptFrameIterator it(isolate);
+ isolate->debug()->Break(args, it.frame());
+ isolate->debug()->SetAfterBreakTarget(it.frame());
return isolate->heap()->undefined_value();
}
-// Helper functions for wrapping and unwrapping stack frame ids.
-static Smi* WrapFrameId(StackFrame::Id id) {
- DCHECK(IsAligned(OffsetFrom(id), static_cast<intptr_t>(4)));
- return Smi::FromInt(id >> 2);
-}
-
-
-static StackFrame::Id UnwrapFrameId(int wrapped) {
- return static_cast<StackFrame::Id>(wrapped << 2);
+RUNTIME_FUNCTION(Runtime_HandleDebuggerStatement) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 0);
+ isolate->debug()->HandleDebugBreak();
+ return isolate->heap()->undefined_value();
}
@@ -311,9 +309,8 @@ RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
if (name->AsArrayIndex(&index)) {
Handle<FixedArray> details = isolate->factory()->NewFixedArray(2);
Handle<Object> element_or_char;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, element_or_char,
- Runtime::GetElementOrCharAt(isolate, obj, index));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, element_or_char,
+ Object::GetElement(isolate, obj, index));
details->set(0, *element_or_char);
details->set(1, PropertyDetails::Empty().AsSmi());
return *isolate->factory()->NewJSArrayWithElements(details);
@@ -465,93 +462,6 @@ RUNTIME_FUNCTION(Runtime_GetFrameCount) {
}
-class FrameInspector {
- public:
- FrameInspector(JavaScriptFrame* frame, int inlined_jsframe_index,
- Isolate* isolate)
- : frame_(frame), deoptimized_frame_(NULL), isolate_(isolate) {
- has_adapted_arguments_ = frame_->has_adapted_arguments();
- is_bottommost_ = inlined_jsframe_index == 0;
- is_optimized_ = frame_->is_optimized();
- // Calculate the deoptimized frame.
- if (frame->is_optimized()) {
- // TODO(turbofan): Revisit once we support deoptimization.
- if (frame->LookupCode()->is_turbofanned() &&
- frame->function()->shared()->asm_function() &&
- !FLAG_turbo_asm_deoptimization) {
- is_optimized_ = false;
- return;
- }
-
- deoptimized_frame_ = Deoptimizer::DebuggerInspectableFrame(
- frame, inlined_jsframe_index, isolate);
- }
- }
-
- ~FrameInspector() {
- // Get rid of the calculated deoptimized frame if any.
- if (deoptimized_frame_ != NULL) {
- Deoptimizer::DeleteDebuggerInspectableFrame(deoptimized_frame_, isolate_);
- }
- }
-
- int GetParametersCount() {
- return is_optimized_ ? deoptimized_frame_->parameters_count()
- : frame_->ComputeParametersCount();
- }
- int expression_count() { return deoptimized_frame_->expression_count(); }
- Object* GetFunction() {
- return is_optimized_ ? deoptimized_frame_->GetFunction()
- : frame_->function();
- }
- Object* GetParameter(int index) {
- return is_optimized_ ? deoptimized_frame_->GetParameter(index)
- : frame_->GetParameter(index);
- }
- Object* GetExpression(int index) {
- // TODO(turbofan): Revisit once we support deoptimization.
- if (frame_->LookupCode()->is_turbofanned() &&
- frame_->function()->shared()->asm_function() &&
- !FLAG_turbo_asm_deoptimization) {
- return isolate_->heap()->undefined_value();
- }
- return is_optimized_ ? deoptimized_frame_->GetExpression(index)
- : frame_->GetExpression(index);
- }
- int GetSourcePosition() {
- return is_optimized_ ? deoptimized_frame_->GetSourcePosition()
- : frame_->LookupCode()->SourcePosition(frame_->pc());
- }
- bool IsConstructor() {
- return is_optimized_ && !is_bottommost_
- ? deoptimized_frame_->HasConstructStub()
- : frame_->IsConstructor();
- }
- Object* GetContext() {
- return is_optimized_ ? deoptimized_frame_->GetContext() : frame_->context();
- }
-
- // To inspect all the provided arguments the frame might need to be
- // replaced with the arguments frame.
- void SetArgumentsFrame(JavaScriptFrame* frame) {
- DCHECK(has_adapted_arguments_);
- frame_ = frame;
- is_optimized_ = frame_->is_optimized();
- DCHECK(!is_optimized_);
- }
-
- private:
- JavaScriptFrame* frame_;
- DeoptimizedFrameInfo* deoptimized_frame_;
- Isolate* isolate_;
- bool is_optimized_;
- bool is_bottommost_;
- bool has_adapted_arguments_;
-
- DISALLOW_COPY_AND_ASSIGN(FrameInspector);
-};
-
-
static const int kFrameDetailsFrameIdIndex = 0;
static const int kFrameDetailsReceiverIndex = 1;
static const int kFrameDetailsFunctionIndex = 2;
@@ -564,34 +474,6 @@ static const int kFrameDetailsFlagsIndex = 8;
static const int kFrameDetailsFirstDynamicIndex = 9;
-static SaveContext* FindSavedContextForFrame(Isolate* isolate,
- JavaScriptFrame* frame) {
- SaveContext* save = isolate->save_context();
- while (save != NULL && !save->IsBelowFrame(frame)) {
- save = save->prev();
- }
- DCHECK(save != NULL);
- return save;
-}
-
-
-// Advances the iterator to the frame that matches the index and returns the
-// inlined frame index, or -1 if not found. Skips native JS functions.
-int Runtime::FindIndexedNonNativeFrame(JavaScriptFrameIterator* it, int index) {
- int count = -1;
- for (; !it->done(); it->Advance()) {
- List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- it->frame()->Summarize(&frames);
- for (int i = frames.length() - 1; i >= 0; i--) {
- // Omit functions from native and extension scripts.
- if (!frames[i].function()->IsSubjectToDebugging()) continue;
- if (++count == index) return i;
- }
- }
- return -1;
-}
-
-
// Return an array with frame details
// args[0]: number: break id
// args[1]: number: frame index
@@ -627,7 +509,8 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
JavaScriptFrameIterator it(isolate, id);
// Inlined frame index in optimized frame, starting from outer function.
- int inlined_jsframe_index = Runtime::FindIndexedNonNativeFrame(&it, index);
+ int inlined_jsframe_index =
+ DebugFrameHelper::FindIndexedNonNativeFrame(&it, index);
if (inlined_jsframe_index == -1) return heap->undefined_value();
FrameInspector frame_inspector(it.frame(), inlined_jsframe_index, isolate);
@@ -635,10 +518,12 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// Traverse the saved contexts chain to find the active context for the
// selected frame.
- SaveContext* save = FindSavedContextForFrame(isolate, it.frame());
+ SaveContext* save =
+ DebugFrameHelper::FindSavedContextForFrame(isolate, it.frame());
// Get the frame id.
- Handle<Object> frame_id(WrapFrameId(it.frame()->id()), isolate);
+ Handle<Object> frame_id(DebugFrameHelper::WrapFrameId(it.frame()->id()),
+ isolate);
// Find source position in unoptimized code.
int position = frame_inspector.GetSourcePosition();
@@ -855,932 +740,6 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
}
-static bool ParameterIsShadowedByContextLocal(Handle<ScopeInfo> info,
- Handle<String> parameter_name) {
- VariableMode mode;
- VariableLocation location;
- InitializationFlag init_flag;
- MaybeAssignedFlag maybe_assigned_flag;
- return ScopeInfo::ContextSlotIndex(info, parameter_name, &mode, &location,
- &init_flag, &maybe_assigned_flag) != -1;
-}
-
-
-static Handle<Context> MaterializeReceiver(Isolate* isolate,
- Handle<Context> target,
- Handle<JSFunction> function,
- JavaScriptFrame* frame) {
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
- Handle<Object> receiver;
- switch (scope_info->scope_type()) {
- case FUNCTION_SCOPE: {
- VariableMode mode;
- VariableLocation location;
- InitializationFlag init_flag;
- MaybeAssignedFlag maybe_assigned_flag;
-
- // Don't bother creating a fake context node if "this" is in the context
- // already.
- if (ScopeInfo::ContextSlotIndex(
- scope_info, isolate->factory()->this_string(), &mode, &location,
- &init_flag, &maybe_assigned_flag) >= 0) {
- return target;
- }
- receiver = handle(frame->receiver(), isolate);
- break;
- }
- case MODULE_SCOPE:
- receiver = isolate->factory()->undefined_value();
- break;
- case SCRIPT_SCOPE:
- receiver = handle(function->global_proxy(), isolate);
- break;
- default:
- // For eval code, arrow functions, and the like, there's no "this" binding
- // to materialize.
- return target;
- }
-
- return isolate->factory()->NewCatchContext(
- function, target, isolate->factory()->this_string(), receiver);
-}
-
-
-// Create a plain JSObject which materializes the local scope for the specified
-// frame.
-static void MaterializeStackLocalsWithFrameInspector(
- Isolate* isolate, Handle<JSObject> target, Handle<ScopeInfo> scope_info,
- FrameInspector* frame_inspector) {
- // First fill all parameters.
- for (int i = 0; i < scope_info->ParameterCount(); ++i) {
- // Do not materialize the parameter if it is shadowed by a context local.
- Handle<String> name(scope_info->ParameterName(i));
- if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
-
- DCHECK_NOT_NULL(frame_inspector);
-
- HandleScope scope(isolate);
- Handle<Object> value(i < frame_inspector->GetParametersCount()
- ? frame_inspector->GetParameter(i)
- : isolate->heap()->undefined_value(),
- isolate);
- DCHECK(!value->IsTheHole());
-
- JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
- }
-
- // Second fill all stack locals.
- for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
- if (scope_info->LocalIsSynthetic(i)) continue;
- Handle<String> name(scope_info->StackLocalName(i));
- Handle<Object> value(
- frame_inspector->GetExpression(scope_info->StackLocalIndex(i)),
- isolate);
- if (value->IsTheHole()) {
- value = isolate->factory()->undefined_value();
- }
-
- JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
- }
-}
-
-static void MaterializeStackLocalsWithFrameInspector(
- Isolate* isolate, Handle<JSObject> target, Handle<JSFunction> function,
- FrameInspector* frame_inspector) {
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
-
- MaterializeStackLocalsWithFrameInspector(isolate, target, scope_info,
- frame_inspector);
-}
-
-
-static void UpdateStackLocalsFromMaterializedObject(
- Isolate* isolate, Handle<JSObject> target, Handle<ScopeInfo> scope_info,
- JavaScriptFrame* frame, int inlined_jsframe_index) {
- if (inlined_jsframe_index != 0 || frame->is_optimized()) {
- // Optimized frames are not supported.
- // TODO(yangguo): make sure all code deoptimized when debugger is active
- // and assert that this cannot happen.
- return;
- }
-
- // Parameters.
- for (int i = 0; i < scope_info->ParameterCount(); ++i) {
- // Shadowed parameters were not materialized.
- Handle<String> name(scope_info->ParameterName(i));
- if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
-
- DCHECK(!frame->GetParameter(i)->IsTheHole());
- HandleScope scope(isolate);
- Handle<Object> value =
- Object::GetPropertyOrElement(target, name).ToHandleChecked();
- frame->SetParameterValue(i, *value);
- }
-
- // Stack locals.
- for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
- if (scope_info->LocalIsSynthetic(i)) continue;
- int index = scope_info->StackLocalIndex(i);
- if (frame->GetExpression(index)->IsTheHole()) continue;
- HandleScope scope(isolate);
- Handle<Object> value = Object::GetPropertyOrElement(
- target, handle(scope_info->StackLocalName(i),
- isolate)).ToHandleChecked();
- frame->SetExpression(index, *value);
- }
-}
-
-
-MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalContext(
- Isolate* isolate, Handle<JSObject> target, Handle<JSFunction> function,
- JavaScriptFrame* frame) {
- HandleScope scope(isolate);
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
-
- if (!scope_info->HasContext()) return target;
-
- // Third fill all context locals.
- Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context(frame_context->declaration_context());
- ScopeInfo::CopyContextLocalsToScopeObject(scope_info, function_context,
- target);
-
- // Finally copy any properties from the function context extension.
- // These will be variables introduced by eval.
- if (function_context->closure() == *function) {
- if (function_context->has_extension() &&
- !function_context->IsNativeContext()) {
- Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- Handle<FixedArray> keys;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, keys, JSReceiver::GetKeys(ext, JSReceiver::INCLUDE_PROTOS),
- JSObject);
-
- for (int i = 0; i < keys->length(); i++) {
- // Names of variables introduced by eval are strings.
- DCHECK(keys->get(i)->IsString());
- Handle<String> key(String::cast(keys->get(i)));
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, value, Object::GetPropertyOrElement(ext, key), JSObject);
- RETURN_ON_EXCEPTION(isolate, Runtime::SetObjectProperty(
- isolate, target, key, value, SLOPPY),
- JSObject);
- }
- }
- }
-
- return target;
-}
-
-
-MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeScriptScope(
- Handle<GlobalObject> global) {
- Isolate* isolate = global->GetIsolate();
- Handle<ScriptContextTable> script_contexts(
- global->native_context()->script_context_table());
-
- Handle<JSObject> script_scope =
- isolate->factory()->NewJSObject(isolate->object_function());
-
- for (int context_index = 0; context_index < script_contexts->used();
- context_index++) {
- Handle<Context> context =
- ScriptContextTable::GetContext(script_contexts, context_index);
- Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
- ScopeInfo::CopyContextLocalsToScopeObject(scope_info, context,
- script_scope);
- }
- return script_scope;
-}
-
-
-MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalScope(
- Isolate* isolate, JavaScriptFrame* frame, int inlined_jsframe_index) {
- FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
- Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
-
- Handle<JSObject> local_scope =
- isolate->factory()->NewJSObject(isolate->object_function());
- MaterializeStackLocalsWithFrameInspector(isolate, local_scope, function,
- &frame_inspector);
-
- return MaterializeLocalContext(isolate, local_scope, function, frame);
-}
-
-
-// Set the context local variable value.
-static bool SetContextLocalValue(Isolate* isolate, Handle<ScopeInfo> scope_info,
- Handle<Context> context,
- Handle<String> variable_name,
- Handle<Object> new_value) {
- for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
- Handle<String> next_name(scope_info->ContextLocalName(i));
- if (String::Equals(variable_name, next_name)) {
- VariableMode mode;
- VariableLocation location;
- InitializationFlag init_flag;
- MaybeAssignedFlag maybe_assigned_flag;
- int context_index =
- ScopeInfo::ContextSlotIndex(scope_info, next_name, &mode, &location,
- &init_flag, &maybe_assigned_flag);
- context->set(context_index, *new_value);
- return true;
- }
- }
-
- return false;
-}
-
-
-static bool SetLocalVariableValue(Isolate* isolate, JavaScriptFrame* frame,
- int inlined_jsframe_index,
- Handle<String> variable_name,
- Handle<Object> new_value) {
- if (inlined_jsframe_index != 0 || frame->is_optimized()) {
- // Optimized frames are not supported.
- return false;
- }
-
- Handle<JSFunction> function(frame->function());
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
-
- bool default_result = false;
-
- // Parameters.
- for (int i = 0; i < scope_info->ParameterCount(); ++i) {
- HandleScope scope(isolate);
- if (String::Equals(handle(scope_info->ParameterName(i)), variable_name)) {
- frame->SetParameterValue(i, *new_value);
- // Argument might be shadowed in heap context, don't stop here.
- default_result = true;
- }
- }
-
- // Stack locals.
- for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
- HandleScope scope(isolate);
- if (String::Equals(handle(scope_info->StackLocalName(i)), variable_name)) {
- frame->SetExpression(scope_info->StackLocalIndex(i), *new_value);
- return true;
- }
- }
-
- if (scope_info->HasContext()) {
- // Context locals.
- Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context(frame_context->declaration_context());
- if (SetContextLocalValue(isolate, scope_info, function_context,
- variable_name, new_value)) {
- return true;
- }
-
- // Function context extension. These are variables introduced by eval.
- if (function_context->closure() == *function) {
- if (function_context->has_extension() &&
- !function_context->IsNativeContext()) {
- Handle<JSObject> ext(JSObject::cast(function_context->extension()));
-
- Maybe<bool> maybe = JSReceiver::HasProperty(ext, variable_name);
- DCHECK(maybe.IsJust());
- if (maybe.FromJust()) {
- // We don't expect this to do anything except replacing
- // property value.
- Runtime::SetObjectProperty(isolate, ext, variable_name, new_value,
- SLOPPY).Assert();
- return true;
- }
- }
- }
- }
-
- return default_result;
-}
-
-
-static bool SetBlockVariableValue(Isolate* isolate,
- Handle<Context> block_context,
- Handle<ScopeInfo> scope_info,
- JavaScriptFrame* frame,
- Handle<String> variable_name,
- Handle<Object> new_value) {
- if (frame != nullptr) {
- for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
- HandleScope scope(isolate);
- if (String::Equals(handle(scope_info->StackLocalName(i)),
- variable_name)) {
- frame->SetExpression(scope_info->StackLocalIndex(i), *new_value);
- return true;
- }
- }
- }
- if (!block_context.is_null()) {
- return SetContextLocalValue(block_context->GetIsolate(), scope_info,
- block_context, variable_name, new_value);
- }
- return false;
-}
-
-
-// Create a plain JSObject which materializes the closure content for the
-// context.
-static Handle<JSObject> MaterializeClosure(Isolate* isolate,
- Handle<Context> context) {
- DCHECK(context->IsFunctionContext());
-
- Handle<SharedFunctionInfo> shared(context->closure()->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
-
- // Allocate and initialize a JSObject with all the content of this function
- // closure.
- Handle<JSObject> closure_scope =
- isolate->factory()->NewJSObject(isolate->object_function());
-
- // Fill all context locals to the context extension.
- ScopeInfo::CopyContextLocalsToScopeObject(scope_info, context, closure_scope);
-
- // Finally copy any properties from the function context extension. This will
- // be variables introduced by eval.
- if (context->has_extension()) {
- Handle<JSObject> ext(JSObject::cast(context->extension()));
- DCHECK(ext->IsJSContextExtensionObject());
- Handle<FixedArray> keys =
- JSReceiver::GetKeys(ext, JSReceiver::OWN_ONLY).ToHandleChecked();
-
- for (int i = 0; i < keys->length(); i++) {
- HandleScope scope(isolate);
- // Names of variables introduced by eval are strings.
- DCHECK(keys->get(i)->IsString());
- Handle<String> key(String::cast(keys->get(i)));
- Handle<Object> value = Object::GetProperty(ext, key).ToHandleChecked();
- JSObject::SetOwnPropertyIgnoreAttributes(closure_scope, key, value, NONE)
- .Check();
- }
- }
-
- return closure_scope;
-}
-
-
-// This method copies structure of MaterializeClosure method above.
-static bool SetClosureVariableValue(Isolate* isolate, Handle<Context> context,
- Handle<String> variable_name,
- Handle<Object> new_value) {
- DCHECK(context->IsFunctionContext());
-
- Handle<SharedFunctionInfo> shared(context->closure()->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
-
- // Context locals to the context extension.
- if (SetContextLocalValue(isolate, scope_info, context, variable_name,
- new_value)) {
- return true;
- }
-
- // Properties from the function context extension. This will
- // be variables introduced by eval.
- if (context->has_extension()) {
- Handle<JSObject> ext(JSObject::cast(context->extension()));
- DCHECK(ext->IsJSContextExtensionObject());
- Maybe<bool> maybe = JSReceiver::HasOwnProperty(ext, variable_name);
- DCHECK(maybe.IsJust());
- if (maybe.FromJust()) {
- // We don't expect this to do anything except replacing property value.
- JSObject::SetOwnPropertyIgnoreAttributes(ext, variable_name, new_value,
- NONE).Check();
- return true;
- }
- }
-
- return false;
-}
-
-
-static bool SetScriptVariableValue(Handle<Context> context,
- Handle<String> variable_name,
- Handle<Object> new_value) {
- Handle<ScriptContextTable> script_contexts(
- context->global_object()->native_context()->script_context_table());
- ScriptContextTable::LookupResult lookup_result;
- if (ScriptContextTable::Lookup(script_contexts, variable_name,
- &lookup_result)) {
- Handle<Context> script_context = ScriptContextTable::GetContext(
- script_contexts, lookup_result.context_index);
- script_context->set(lookup_result.slot_index, *new_value);
- return true;
- }
-
- return false;
-}
-
-
-// Create a plain JSObject which materializes the scope for the specified
-// catch context.
-static Handle<JSObject> MaterializeCatchScope(Isolate* isolate,
- Handle<Context> context) {
- DCHECK(context->IsCatchContext());
- Handle<String> name(String::cast(context->extension()));
- Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX),
- isolate);
- Handle<JSObject> catch_scope =
- isolate->factory()->NewJSObject(isolate->object_function());
- JSObject::SetOwnPropertyIgnoreAttributes(catch_scope, name, thrown_object,
- NONE).Check();
- return catch_scope;
-}
-
-
-static bool SetCatchVariableValue(Isolate* isolate, Handle<Context> context,
- Handle<String> variable_name,
- Handle<Object> new_value) {
- DCHECK(context->IsCatchContext());
- Handle<String> name(String::cast(context->extension()));
- if (!String::Equals(name, variable_name)) {
- return false;
- }
- context->set(Context::THROWN_OBJECT_INDEX, *new_value);
- return true;
-}
-
-
-// Create a plain JSObject which materializes the block scope for the specified
-// block context.
-static Handle<JSObject> MaterializeBlockScope(Isolate* isolate,
- Handle<ScopeInfo> scope_info,
- Handle<Context> context,
- JavaScriptFrame* frame,
- int inlined_jsframe_index) {
- Handle<JSObject> block_scope =
- isolate->factory()->NewJSObject(isolate->object_function());
-
- if (frame != nullptr) {
- FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
- MaterializeStackLocalsWithFrameInspector(isolate, block_scope, scope_info,
- &frame_inspector);
- }
-
- if (!context.is_null()) {
- Handle<ScopeInfo> scope_info_from_context(
- ScopeInfo::cast(context->extension()));
- // Fill all context locals.
- ScopeInfo::CopyContextLocalsToScopeObject(scope_info_from_context, context,
- block_scope);
- }
-
- return block_scope;
-}
-
-
-// Create a plain JSObject which materializes the module scope for the specified
-// module context.
-MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeModuleScope(
- Isolate* isolate, Handle<Context> context) {
- DCHECK(context->IsModuleContext());
- Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
-
- // Allocate and initialize a JSObject with all the members of the debugged
- // module.
- Handle<JSObject> module_scope =
- isolate->factory()->NewJSObject(isolate->object_function());
-
- // Fill all context locals.
- ScopeInfo::CopyContextLocalsToScopeObject(scope_info, context, module_scope);
-
- return module_scope;
-}
-
-
-// Iterate over the actual scopes visible from a stack frame or from a closure.
-// The iteration proceeds from the innermost visible nested scope outwards.
-// All scopes are backed by an actual context except the local scope,
-// which is inserted "artificially" in the context chain.
-class ScopeIterator {
- public:
- enum ScopeType {
- ScopeTypeGlobal = 0,
- ScopeTypeLocal,
- ScopeTypeWith,
- ScopeTypeClosure,
- ScopeTypeCatch,
- ScopeTypeBlock,
- ScopeTypeScript,
- ScopeTypeModule
- };
-
- ScopeIterator(Isolate* isolate, JavaScriptFrame* frame,
- int inlined_jsframe_index, bool ignore_nested_scopes = false)
- : isolate_(isolate),
- frame_(frame),
- inlined_jsframe_index_(inlined_jsframe_index),
- function_(frame->function()),
- context_(Context::cast(frame->context())),
- nested_scope_chain_(4),
- seen_script_scope_(false),
- failed_(false) {
- // Catch the case when the debugger stops in an internal function.
- Handle<SharedFunctionInfo> shared_info(function_->shared());
- Handle<ScopeInfo> scope_info(shared_info->scope_info());
- if (shared_info->script() == isolate->heap()->undefined_value()) {
- while (context_->closure() == *function_) {
- context_ = Handle<Context>(context_->previous(), isolate_);
- }
- return;
- }
-
- // Get the debug info (create it if it does not exist).
- if (!isolate->debug()->EnsureDebugInfo(shared_info, function_)) {
- // Return if ensuring debug info failed.
- return;
- }
-
- // Currently it takes too much time to find nested scopes due to script
- // parsing. Sometimes we want to run the ScopeIterator as fast as possible
- // (for example, while collecting async call stacks on every
- // addEventListener call), even if we drop some nested scopes.
- // Later we may optimize getting the nested scopes (cache the result?)
- // and include nested scopes into the "fast" iteration case as well.
- if (!ignore_nested_scopes) {
- Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared_info);
-
- // PC points to the instruction after the current one, possibly a break
- // location as well. So the "- 1" to exclude it from the search.
- Address call_pc = frame->pc() - 1;
-
- // Find the break point where execution has stopped.
- BreakLocation location =
- BreakLocation::FromAddress(debug_info, ALL_BREAK_LOCATIONS, call_pc);
-
- // Within the return sequence at the moment it is not possible to
- // get a source position which is consistent with the current scope chain.
- // Thus all nested with, catch and block contexts are skipped and we only
- // provide the function scope.
- ignore_nested_scopes = location.IsExit();
- }
-
- if (ignore_nested_scopes) {
- if (scope_info->HasContext()) {
- context_ = Handle<Context>(context_->declaration_context(), isolate_);
- } else {
- while (context_->closure() == *function_) {
- context_ = Handle<Context>(context_->previous(), isolate_);
- }
- }
- if (scope_info->scope_type() == FUNCTION_SCOPE ||
- scope_info->scope_type() == ARROW_SCOPE) {
- nested_scope_chain_.Add(scope_info);
- }
- } else {
- // Reparse the code and analyze the scopes.
- Handle<Script> script(Script::cast(shared_info->script()));
- Scope* scope = NULL;
-
- // Check whether we are in global, eval or function code.
- Handle<ScopeInfo> scope_info(shared_info->scope_info());
- Zone zone;
- if (scope_info->scope_type() != FUNCTION_SCOPE &&
- scope_info->scope_type() != ARROW_SCOPE) {
- // Global or eval code.
- ParseInfo info(&zone, script);
- if (scope_info->scope_type() == SCRIPT_SCOPE) {
- info.set_global();
- } else {
- DCHECK(scope_info->scope_type() == EVAL_SCOPE);
- info.set_eval();
- info.set_context(Handle<Context>(function_->context()));
- }
- if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
- scope = info.function()->scope();
- }
- RetrieveScopeChain(scope, shared_info);
- } else {
- // Function code
- ParseInfo info(&zone, function_);
- if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
- scope = info.function()->scope();
- }
- RetrieveScopeChain(scope, shared_info);
- }
- }
- }
-
- ScopeIterator(Isolate* isolate, Handle<JSFunction> function)
- : isolate_(isolate),
- frame_(NULL),
- inlined_jsframe_index_(0),
- function_(function),
- context_(function->context()),
- seen_script_scope_(false),
- failed_(false) {
- if (function->IsBuiltin()) {
- context_ = Handle<Context>();
- }
- }
-
- // More scopes?
- bool Done() {
- DCHECK(!failed_);
- return context_.is_null();
- }
-
- bool Failed() { return failed_; }
-
- // Move to the next scope.
- void Next() {
- DCHECK(!failed_);
- ScopeType scope_type = Type();
- if (scope_type == ScopeTypeGlobal) {
- // The global scope is always the last in the chain.
- DCHECK(context_->IsNativeContext());
- context_ = Handle<Context>();
- return;
- }
- if (scope_type == ScopeTypeScript) {
- seen_script_scope_ = true;
- if (context_->IsScriptContext()) {
- context_ = Handle<Context>(context_->previous(), isolate_);
- }
- if (!nested_scope_chain_.is_empty()) {
- DCHECK_EQ(nested_scope_chain_.last()->scope_type(), SCRIPT_SCOPE);
- nested_scope_chain_.RemoveLast();
- DCHECK(nested_scope_chain_.is_empty());
- }
- CHECK(context_->IsNativeContext());
- return;
- }
- if (nested_scope_chain_.is_empty()) {
- context_ = Handle<Context>(context_->previous(), isolate_);
- } else {
- if (nested_scope_chain_.last()->HasContext()) {
- DCHECK(context_->previous() != NULL);
- context_ = Handle<Context>(context_->previous(), isolate_);
- }
- nested_scope_chain_.RemoveLast();
- }
- }
-
- // Return the type of the current scope.
- ScopeType Type() {
- DCHECK(!failed_);
- if (!nested_scope_chain_.is_empty()) {
- Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
- switch (scope_info->scope_type()) {
- case FUNCTION_SCOPE:
- case ARROW_SCOPE:
- DCHECK(context_->IsFunctionContext() || !scope_info->HasContext());
- return ScopeTypeLocal;
- case MODULE_SCOPE:
- DCHECK(context_->IsModuleContext());
- return ScopeTypeModule;
- case SCRIPT_SCOPE:
- DCHECK(context_->IsScriptContext() || context_->IsNativeContext());
- return ScopeTypeScript;
- case WITH_SCOPE:
- DCHECK(context_->IsWithContext());
- return ScopeTypeWith;
- case CATCH_SCOPE:
- DCHECK(context_->IsCatchContext());
- return ScopeTypeCatch;
- case BLOCK_SCOPE:
- DCHECK(!scope_info->HasContext() || context_->IsBlockContext());
- return ScopeTypeBlock;
- case EVAL_SCOPE:
- UNREACHABLE();
- }
- }
- if (context_->IsNativeContext()) {
- DCHECK(context_->global_object()->IsGlobalObject());
- // If we are at the native context and have not yet seen script scope,
- // fake it.
- return seen_script_scope_ ? ScopeTypeGlobal : ScopeTypeScript;
- }
- if (context_->IsFunctionContext()) {
- return ScopeTypeClosure;
- }
- if (context_->IsCatchContext()) {
- return ScopeTypeCatch;
- }
- if (context_->IsBlockContext()) {
- return ScopeTypeBlock;
- }
- if (context_->IsModuleContext()) {
- return ScopeTypeModule;
- }
- if (context_->IsScriptContext()) {
- return ScopeTypeScript;
- }
- DCHECK(context_->IsWithContext());
- return ScopeTypeWith;
- }
-
- // Return the JavaScript object with the content of the current scope.
- MaybeHandle<JSObject> ScopeObject() {
- DCHECK(!failed_);
- switch (Type()) {
- case ScopeIterator::ScopeTypeGlobal:
- return Handle<JSObject>(CurrentContext()->global_object());
- case ScopeIterator::ScopeTypeScript:
- return MaterializeScriptScope(
- Handle<GlobalObject>(CurrentContext()->global_object()));
- case ScopeIterator::ScopeTypeLocal:
- // Materialize the content of the local scope into a JSObject.
- DCHECK(nested_scope_chain_.length() == 1);
- return MaterializeLocalScope(isolate_, frame_, inlined_jsframe_index_);
- case ScopeIterator::ScopeTypeWith:
- // Return the with object.
- return Handle<JSObject>(JSObject::cast(CurrentContext()->extension()));
- case ScopeIterator::ScopeTypeCatch:
- return MaterializeCatchScope(isolate_, CurrentContext());
- case ScopeIterator::ScopeTypeClosure:
- // Materialize the content of the closure scope into a JSObject.
- return MaterializeClosure(isolate_, CurrentContext());
- case ScopeIterator::ScopeTypeBlock: {
- if (!nested_scope_chain_.is_empty()) {
- // this is a block scope on the stack.
- Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
- Handle<Context> context = scope_info->HasContext()
- ? CurrentContext()
- : Handle<Context>::null();
- return MaterializeBlockScope(isolate_, scope_info, context, frame_,
- inlined_jsframe_index_);
- } else {
- return MaterializeBlockScope(isolate_, Handle<ScopeInfo>::null(),
- CurrentContext(), nullptr, 0);
- }
- }
- case ScopeIterator::ScopeTypeModule:
- return MaterializeModuleScope(isolate_, CurrentContext());
- }
- UNREACHABLE();
- return Handle<JSObject>();
- }
-
- bool HasContext() {
- ScopeType type = Type();
- if (type == ScopeTypeBlock || type == ScopeTypeLocal) {
- if (!nested_scope_chain_.is_empty()) {
- return nested_scope_chain_.last()->HasContext();
- }
- }
- return true;
- }
-
- bool SetVariableValue(Handle<String> variable_name,
- Handle<Object> new_value) {
- DCHECK(!failed_);
- switch (Type()) {
- case ScopeIterator::ScopeTypeGlobal:
- break;
- case ScopeIterator::ScopeTypeLocal:
- return SetLocalVariableValue(isolate_, frame_, inlined_jsframe_index_,
- variable_name, new_value);
- case ScopeIterator::ScopeTypeWith:
- break;
- case ScopeIterator::ScopeTypeCatch:
- return SetCatchVariableValue(isolate_, CurrentContext(), variable_name,
- new_value);
- case ScopeIterator::ScopeTypeClosure:
- return SetClosureVariableValue(isolate_, CurrentContext(),
- variable_name, new_value);
- case ScopeIterator::ScopeTypeScript:
- return SetScriptVariableValue(CurrentContext(), variable_name,
- new_value);
- case ScopeIterator::ScopeTypeBlock:
- return SetBlockVariableValue(
- isolate_, HasContext() ? CurrentContext() : Handle<Context>::null(),
- CurrentScopeInfo(), frame_, variable_name, new_value);
- case ScopeIterator::ScopeTypeModule:
- // TODO(2399): should we implement it?
- break;
- }
- return false;
- }
-
- Handle<ScopeInfo> CurrentScopeInfo() {
- DCHECK(!failed_);
- if (!nested_scope_chain_.is_empty()) {
- return nested_scope_chain_.last();
- } else if (context_->IsBlockContext()) {
- return Handle<ScopeInfo>(ScopeInfo::cast(context_->extension()));
- } else if (context_->IsFunctionContext()) {
- return Handle<ScopeInfo>(context_->closure()->shared()->scope_info());
- }
- return Handle<ScopeInfo>::null();
- }
-
- // Return the context for this scope. For the local context there might not
- // be an actual context.
- Handle<Context> CurrentContext() {
- DCHECK(!failed_);
- if (Type() == ScopeTypeGlobal || Type() == ScopeTypeScript ||
- nested_scope_chain_.is_empty()) {
- return context_;
- } else if (nested_scope_chain_.last()->HasContext()) {
- return context_;
- } else {
- return Handle<Context>();
- }
- }
-
-#ifdef DEBUG
- // Debug print of the content of the current scope.
- void DebugPrint() {
- OFStream os(stdout);
- DCHECK(!failed_);
- switch (Type()) {
- case ScopeIterator::ScopeTypeGlobal:
- os << "Global:\n";
- CurrentContext()->Print(os);
- break;
-
- case ScopeIterator::ScopeTypeLocal: {
- os << "Local:\n";
- function_->shared()->scope_info()->Print();
- if (!CurrentContext().is_null()) {
- CurrentContext()->Print(os);
- if (CurrentContext()->has_extension()) {
- Handle<Object> extension(CurrentContext()->extension(), isolate_);
- if (extension->IsJSContextExtensionObject()) {
- extension->Print(os);
- }
- }
- }
- break;
- }
-
- case ScopeIterator::ScopeTypeWith:
- os << "With:\n";
- CurrentContext()->extension()->Print(os);
- break;
-
- case ScopeIterator::ScopeTypeCatch:
- os << "Catch:\n";
- CurrentContext()->extension()->Print(os);
- CurrentContext()->get(Context::THROWN_OBJECT_INDEX)->Print(os);
- break;
-
- case ScopeIterator::ScopeTypeClosure:
- os << "Closure:\n";
- CurrentContext()->Print(os);
- if (CurrentContext()->has_extension()) {
- Handle<Object> extension(CurrentContext()->extension(), isolate_);
- if (extension->IsJSContextExtensionObject()) {
- extension->Print(os);
- }
- }
- break;
-
- case ScopeIterator::ScopeTypeScript:
- os << "Script:\n";
- CurrentContext()
- ->global_object()
- ->native_context()
- ->script_context_table()
- ->Print(os);
- break;
-
- default:
- UNREACHABLE();
- }
- PrintF("\n");
- }
-#endif
-
- private:
- Isolate* isolate_;
- JavaScriptFrame* frame_;
- int inlined_jsframe_index_;
- Handle<JSFunction> function_;
- Handle<Context> context_;
- List<Handle<ScopeInfo> > nested_scope_chain_;
- bool seen_script_scope_;
- bool failed_;
-
- void RetrieveScopeChain(Scope* scope,
- Handle<SharedFunctionInfo> shared_info) {
- if (scope != NULL) {
- int source_position = shared_info->code()->SourcePosition(frame_->pc());
- scope->GetNestedScopeChain(isolate_, &nested_scope_chain_,
- source_position);
- } else {
- // A failed reparse indicates that the preparser has diverged from the
- // parser or that the preparse data given to the initial parse has been
- // faulty. We fail in debug mode but in release mode we only provide the
- // information we get from the context chain but nothing about
- // completely stack allocated scopes or stack allocated locals.
- // Or it could be due to stack overflow.
- DCHECK(isolate_->has_pending_exception());
- failed_ = true;
- }
- }
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
-};
-
-
RUNTIME_FUNCTION(Runtime_GetScopeCount) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -1790,13 +749,14 @@ RUNTIME_FUNCTION(Runtime_GetScopeCount) {
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
// Get the frame where the debugging is performed.
- StackFrame::Id id = UnwrapFrameId(wrapped_id);
+ StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
JavaScriptFrameIterator it(isolate, id);
JavaScriptFrame* frame = it.frame();
+ FrameInspector frame_inspector(frame, 0, isolate);
// Count the visible scopes.
int n = 0;
- for (ScopeIterator it(isolate, frame, 0); !it.Done(); it.Next()) {
+ for (ScopeIterator it(isolate, &frame_inspector); !it.Done(); it.Next()) {
n++;
}
@@ -1816,85 +776,18 @@ RUNTIME_FUNCTION(Runtime_GetStepInPositions) {
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
// Get the frame where the debugging is performed.
- StackFrame::Id id = UnwrapFrameId(wrapped_id);
+ StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
JavaScriptFrameIterator frame_it(isolate, id);
RUNTIME_ASSERT(!frame_it.done());
- List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- frame_it.frame()->Summarize(&frames);
- FrameSummary summary = frames.first();
-
- Handle<JSFunction> fun = Handle<JSFunction>(summary.function());
- Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>(fun->shared());
-
- if (!isolate->debug()->EnsureDebugInfo(shared, fun)) {
- return isolate->heap()->undefined_value();
- }
-
- Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared);
-
- // Find range of break points starting from the break point where execution
- // has stopped.
- Address call_pc = summary.pc() - 1;
- List<BreakLocation> locations;
- BreakLocation::FromAddressSameStatement(debug_info, ALL_BREAK_LOCATIONS,
- call_pc, &locations);
-
- Handle<JSArray> array = isolate->factory()->NewJSArray(locations.length());
-
- int index = 0;
- for (BreakLocation location : locations) {
- bool accept;
- if (location.pc() > summary.pc()) {
- accept = true;
- } else {
- StackFrame::Id break_frame_id = isolate->debug()->break_frame_id();
- // The break point is near our pc. Could be a step-in possibility,
- // that is currently taken by active debugger call.
- if (break_frame_id == StackFrame::NO_ID) {
- // We are not stepping.
- accept = false;
- } else {
- JavaScriptFrameIterator additional_frame_it(isolate, break_frame_id);
- // If our frame is a top frame and we are stepping, we can do step-in
- // at this place.
- accept = additional_frame_it.frame()->id() == id;
- }
- }
- if (accept) {
- if (location.IsStepInLocation()) {
- Smi* position_value = Smi::FromInt(location.position());
- RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- JSObject::SetElement(array, index, handle(position_value, isolate),
- SLOPPY));
- index++;
- }
- }
+ List<int> positions;
+ isolate->debug()->GetStepinPositions(frame_it.frame(), id, &positions);
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> array = factory->NewFixedArray(positions.length());
+ for (int i = 0; i < positions.length(); ++i) {
+ array->set(i, Smi::FromInt(positions[i]));
}
- return *array;
-}
-
-
-static const int kScopeDetailsTypeIndex = 0;
-static const int kScopeDetailsObjectIndex = 1;
-static const int kScopeDetailsSize = 2;
-
-
-MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeScopeDetails(
- Isolate* isolate, ScopeIterator* it) {
- // Calculate the size of the result.
- int details_size = kScopeDetailsSize;
- Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
-
- // Fill in scope details.
- details->set(kScopeDetailsTypeIndex, Smi::FromInt(it->Type()));
- Handle<JSObject> scope_object;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, scope_object, it->ScopeObject(),
- JSObject);
- details->set(kScopeDetailsObjectIndex, *scope_object);
-
- return isolate->factory()->NewJSArrayWithElements(details);
+ return *factory->NewJSArrayWithElements(array, FAST_SMI_ELEMENTS);
}
@@ -1918,13 +811,14 @@ RUNTIME_FUNCTION(Runtime_GetScopeDetails) {
CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
// Get the frame where the debugging is performed.
- StackFrame::Id id = UnwrapFrameId(wrapped_id);
+ StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
JavaScriptFrameIterator frame_it(isolate, id);
JavaScriptFrame* frame = frame_it.frame();
+ FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
// Find the requested scope.
int n = 0;
- ScopeIterator it(isolate, frame, inlined_jsframe_index);
+ ScopeIterator it(isolate, &frame_inspector);
for (; !it.Done() && n < index; it.Next()) {
n++;
}
@@ -1933,7 +827,7 @@ RUNTIME_FUNCTION(Runtime_GetScopeDetails) {
}
Handle<JSObject> details;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, details,
- MaterializeScopeDetails(isolate, &it));
+ it.MaterializeScopeDetails());
return *details;
}
@@ -1963,16 +857,17 @@ RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
}
// Get the frame where the debugging is performed.
- StackFrame::Id id = UnwrapFrameId(wrapped_id);
+ StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
JavaScriptFrameIterator frame_it(isolate, id);
JavaScriptFrame* frame = frame_it.frame();
+ FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
List<Handle<JSObject> > result(4);
- ScopeIterator it(isolate, frame, inlined_jsframe_index, ignore_nested_scopes);
+ ScopeIterator it(isolate, &frame_inspector, ignore_nested_scopes);
for (; !it.Done(); it.Next()) {
Handle<JSObject> details;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, details,
- MaterializeScopeDetails(isolate, &it));
+ it.MaterializeScopeDetails());
result.Add(details);
}
@@ -2021,7 +916,7 @@ RUNTIME_FUNCTION(Runtime_GetFunctionScopeDetails) {
Handle<JSObject> details;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, details,
- MaterializeScopeDetails(isolate, &it));
+ it.MaterializeScopeDetails());
return *details;
}
@@ -2066,11 +961,12 @@ RUNTIME_FUNCTION(Runtime_SetScopeVariableValue) {
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
// Get the frame where the debugging is performed.
- StackFrame::Id id = UnwrapFrameId(wrapped_id);
+ StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
JavaScriptFrameIterator frame_it(isolate, id);
JavaScriptFrame* frame = frame_it.frame();
+ FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
- ScopeIterator it(isolate, frame, inlined_jsframe_index);
+ ScopeIterator it(isolate, &frame_inspector);
res = SetScopeVariableValue(&it, index, variable_name, new_value);
} else {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
@@ -2090,7 +986,9 @@ RUNTIME_FUNCTION(Runtime_DebugPrintScopes) {
// Print the scopes for the top frame.
StackFrameLocator locator(isolate);
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- for (ScopeIterator it(isolate, frame, 0); !it.Done(); it.Next()) {
+ FrameInspector frame_inspector(frame, 0, isolate);
+
+ for (ScopeIterator it(isolate, &frame_inspector); !it.Done(); it.Next()) {
it.DebugPrint();
}
#endif
@@ -2336,7 +1234,7 @@ RUNTIME_FUNCTION(Runtime_PrepareStep) {
if (wrapped_frame_id == 0) {
frame_id = StackFrame::NO_ID;
} else {
- frame_id = UnwrapFrameId(wrapped_frame_id);
+ frame_id = DebugFrameHelper::UnwrapFrameId(wrapped_frame_id);
}
// Get the step action and check validity.
@@ -2378,254 +1276,6 @@ RUNTIME_FUNCTION(Runtime_ClearStepping) {
}
-// Helper function to find or create the arguments object for
-// Runtime_DebugEvaluate.
-static void MaterializeArgumentsObject(Isolate* isolate,
- Handle<JSObject> target,
- Handle<JSFunction> function) {
- // Do not materialize the arguments object for eval or top-level code.
- // Skip if "arguments" is already taken.
- if (!function->shared()->is_function()) return;
- Maybe<bool> maybe = JSReceiver::HasOwnProperty(
- target, isolate->factory()->arguments_string());
- DCHECK(maybe.IsJust());
- if (maybe.FromJust()) return;
-
- // FunctionGetArguments can't throw an exception.
- Handle<JSObject> arguments =
- Handle<JSObject>::cast(Accessors::FunctionGetArguments(function));
- Handle<String> arguments_str = isolate->factory()->arguments_string();
- JSObject::SetOwnPropertyIgnoreAttributes(target, arguments_str, arguments,
- NONE).Check();
-}
-
-
-// Compile and evaluate source for the given context.
-static MaybeHandle<Object> DebugEvaluate(Isolate* isolate,
- Handle<SharedFunctionInfo> outer_info,
- Handle<Context> context,
- Handle<Object> context_extension,
- Handle<Object> receiver,
- Handle<String> source) {
- if (context_extension->IsJSObject()) {
- Handle<JSObject> extension = Handle<JSObject>::cast(context_extension);
- Handle<JSFunction> closure(context->closure(), isolate);
- context = isolate->factory()->NewWithContext(closure, context, extension);
- }
-
- Handle<JSFunction> eval_fun;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, eval_fun,
- Compiler::GetFunctionFromEval(
- source, outer_info, context, SLOPPY,
- NO_PARSE_RESTRICTION, RelocInfo::kNoPosition),
- Object);
-
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, Execution::Call(isolate, eval_fun, receiver, 0, NULL),
- Object);
-
- // Skip the global proxy as it has no properties and always delegates to the
- // real global object.
- if (result->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, result);
- // TODO(verwaest): This will crash when the global proxy is detached.
- result = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
- }
-
- return result;
-}
-
-
-static Handle<JSObject> NewJSObjectWithNullProto(Isolate* isolate) {
- Handle<JSObject> result =
- isolate->factory()->NewJSObject(isolate->object_function());
- Handle<Map> new_map =
- Map::Copy(Handle<Map>(result->map()), "ObjectWithNullProto");
- Map::SetPrototype(new_map, isolate->factory()->null_value());
- JSObject::MigrateToMap(result, new_map);
- return result;
-}
-
-
-namespace {
-
-// This class builds a context chain for evaluation of expressions
-// in debugger.
-// The scope chain leading up to a breakpoint where evaluation occurs
-// looks like:
-// - [a mix of with, catch and block scopes]
-// - [function stack + context]
-// - [outer context]
-// The builder materializes all stack variables into properties of objects;
-// the expression is then evaluated as if it is inside a series of 'with'
-// statements using those objects. To this end, the builder builds a new
-// context chain, based on a scope chain:
-// - every With and Catch scope begets a cloned context
-// - Block scope begets one or two contexts:
-// - if a block has context-allocated varaibles, its context is cloned
-// - stack locals are materizalized as a With context
-// - Local scope begets a With context for materizalized locals, chained to
-// original function context. Original function context is the end of
-// the chain.
-class EvaluationContextBuilder {
- public:
- EvaluationContextBuilder(Isolate* isolate, JavaScriptFrame* frame,
- int inlined_jsframe_index)
- : isolate_(isolate),
- frame_(frame),
- inlined_jsframe_index_(inlined_jsframe_index) {
- FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
- Handle<JSFunction> function =
- handle(JSFunction::cast(frame_inspector.GetFunction()));
- Handle<Context> outer_context = handle(function->context(), isolate);
- outer_info_ = handle(function->shared());
- Handle<Context> inner_context;
-
- bool stop = false;
- for (ScopeIterator it(isolate, frame, inlined_jsframe_index);
- !it.Failed() && !it.Done() && !stop; it.Next()) {
- ScopeIterator::ScopeType scope_type = it.Type();
-
- if (scope_type == ScopeIterator::ScopeTypeLocal) {
- Handle<Context> parent_context =
- it.HasContext() ? it.CurrentContext() : outer_context;
-
- // The "this" binding, if any, can't be bound via "with". If we need
- // to, add another node onto the outer context to bind "this".
- parent_context =
- MaterializeReceiver(isolate, parent_context, function, frame);
-
- Handle<JSObject> materialized_function =
- NewJSObjectWithNullProto(isolate);
-
- MaterializeStackLocalsWithFrameInspector(isolate, materialized_function,
- function, &frame_inspector);
-
- MaterializeArgumentsObject(isolate, materialized_function, function);
-
- Handle<Context> with_context = isolate->factory()->NewWithContext(
- function, parent_context, materialized_function);
-
- ContextChainElement context_chain_element;
- context_chain_element.original_context = it.CurrentContext();
- context_chain_element.materialized_object = materialized_function;
- context_chain_element.scope_info = it.CurrentScopeInfo();
- context_chain_.Add(context_chain_element);
-
- stop = true;
- RecordContextsInChain(&inner_context, with_context, with_context);
- } else if (scope_type == ScopeIterator::ScopeTypeCatch ||
- scope_type == ScopeIterator::ScopeTypeWith) {
- Handle<Context> cloned_context =
- Handle<Context>::cast(FixedArray::CopySize(
- it.CurrentContext(), it.CurrentContext()->length()));
-
- ContextChainElement context_chain_element;
- context_chain_element.original_context = it.CurrentContext();
- context_chain_element.cloned_context = cloned_context;
- context_chain_.Add(context_chain_element);
-
- RecordContextsInChain(&inner_context, cloned_context, cloned_context);
- } else if (scope_type == ScopeIterator::ScopeTypeBlock) {
- Handle<JSObject> materialized_object =
- NewJSObjectWithNullProto(isolate);
- MaterializeStackLocalsWithFrameInspector(isolate, materialized_object,
- it.CurrentScopeInfo(),
- &frame_inspector);
- if (it.HasContext()) {
- Handle<Context> cloned_context =
- Handle<Context>::cast(FixedArray::CopySize(
- it.CurrentContext(), it.CurrentContext()->length()));
- Handle<Context> with_context = isolate->factory()->NewWithContext(
- function, cloned_context, materialized_object);
-
- ContextChainElement context_chain_element;
- context_chain_element.original_context = it.CurrentContext();
- context_chain_element.cloned_context = cloned_context;
- context_chain_element.materialized_object = materialized_object;
- context_chain_element.scope_info = it.CurrentScopeInfo();
- context_chain_.Add(context_chain_element);
-
- RecordContextsInChain(&inner_context, cloned_context, with_context);
- } else {
- Handle<Context> with_context = isolate->factory()->NewWithContext(
- function, outer_context, materialized_object);
-
- ContextChainElement context_chain_element;
- context_chain_element.materialized_object = materialized_object;
- context_chain_element.scope_info = it.CurrentScopeInfo();
- context_chain_.Add(context_chain_element);
-
- RecordContextsInChain(&inner_context, with_context, with_context);
- }
- } else {
- stop = true;
- }
- }
- if (innermost_context_.is_null()) {
- innermost_context_ = outer_context;
- }
- DCHECK(!innermost_context_.is_null());
- }
-
- void UpdateVariables() {
- for (int i = 0; i < context_chain_.length(); i++) {
- ContextChainElement element = context_chain_[i];
- if (!element.original_context.is_null() &&
- !element.cloned_context.is_null()) {
- Handle<Context> cloned_context = element.cloned_context;
- cloned_context->CopyTo(
- Context::MIN_CONTEXT_SLOTS, *element.original_context,
- Context::MIN_CONTEXT_SLOTS,
- cloned_context->length() - Context::MIN_CONTEXT_SLOTS);
- }
- if (!element.materialized_object.is_null()) {
- // Write back potential changes to materialized stack locals to the
- // stack.
- UpdateStackLocalsFromMaterializedObject(
- isolate_, element.materialized_object, element.scope_info, frame_,
- inlined_jsframe_index_);
- }
- }
- }
-
- Handle<Context> innermost_context() const { return innermost_context_; }
- Handle<SharedFunctionInfo> outer_info() const { return outer_info_; }
-
- private:
- struct ContextChainElement {
- Handle<Context> original_context;
- Handle<Context> cloned_context;
- Handle<JSObject> materialized_object;
- Handle<ScopeInfo> scope_info;
- };
-
- void RecordContextsInChain(Handle<Context>* inner_context,
- Handle<Context> first, Handle<Context> last) {
- if (!inner_context->is_null()) {
- (*inner_context)->set_previous(*last);
- } else {
- innermost_context_ = last;
- }
- *inner_context = first;
- }
-
- Handle<SharedFunctionInfo> outer_info_;
- Handle<Context> innermost_context_;
- List<ContextChainElement> context_chain_;
- Isolate* isolate_;
- JavaScriptFrame* frame_;
- int inlined_jsframe_index_;
-};
-}
-
-
-// Evaluate a piece of JavaScript in the context of a stack frame for
-// debugging. Things that need special attention are:
-// - Parameters and stack-allocated locals need to be materialized. Altered
-// values need to be written back to the stack afterwards.
-// - The arguments object needs to materialized.
RUNTIME_FUNCTION(Runtime_DebugEvaluate) {
HandleScope scope(isolate);
@@ -2641,50 +1291,17 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluate) {
CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 4);
CONVERT_ARG_HANDLE_CHECKED(Object, context_extension, 5);
- // Handle the processing of break.
- DisableBreak disable_break_scope(isolate->debug(), disable_break);
-
- // Get the frame where the debugging is performed.
- StackFrame::Id id = UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator it(isolate, id);
- JavaScriptFrame* frame = it.frame();
-
- // Traverse the saved contexts chain to find the active context for the
- // selected frame.
- SaveContext* save = FindSavedContextForFrame(isolate, frame);
-
- SaveContext savex(isolate);
- isolate->set_context(*(save->context()));
-
- // Materialize stack locals and the arguments object.
-
- EvaluationContextBuilder context_builder(isolate, frame,
- inlined_jsframe_index);
- if (isolate->has_pending_exception()) {
- return isolate->heap()->exception();
- }
-
-
- Handle<Object> receiver(frame->receiver(), isolate);
- MaybeHandle<Object> maybe_result = DebugEvaluate(
- isolate, context_builder.outer_info(),
- context_builder.innermost_context(), context_extension, receiver, source);
+ StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, maybe_result);
- context_builder.UpdateVariables();
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ DebugEvaluate::Local(isolate, id, inlined_jsframe_index, source,
+ disable_break, context_extension));
return *result;
}
-static inline bool IsDebugContext(Isolate* isolate, Context* context) {
- // Try to unwrap script context if it exist.
- if (context->IsScriptContext()) context = context->previous();
- DCHECK_NOT_NULL(context);
- return context == *isolate->debug()->debug_context();
-}
-
-
RUNTIME_FUNCTION(Runtime_DebugEvaluateGlobal) {
HandleScope scope(isolate);
@@ -2698,28 +1315,10 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluateGlobal) {
CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 2);
CONVERT_ARG_HANDLE_CHECKED(Object, context_extension, 3);
- // Handle the processing of break.
- DisableBreak disable_break_scope(isolate->debug(), disable_break);
-
- // Enter the top context from before the debugger was invoked.
- SaveContext save(isolate);
- SaveContext* top = &save;
- while (top != NULL && IsDebugContext(isolate, *top->context())) {
- top = top->prev();
- }
- if (top != NULL) {
- isolate->set_context(*top->context());
- }
-
- // Get the native context now set to the top context from before the
- // debugger was invoked.
- Handle<Context> context = isolate->native_context();
- Handle<JSObject> receiver(context->global_proxy());
- Handle<SharedFunctionInfo> outer_info(context->closure()->shared(), isolate);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, DebugEvaluate(isolate, outer_info, context,
- context_extension, receiver, source));
+ isolate, result,
+ DebugEvaluate::Global(isolate, source, disable_break, context_extension));
return *result;
}
@@ -2760,71 +1359,6 @@ RUNTIME_FUNCTION(Runtime_DebugGetLoadedScripts) {
}
-// Helper function used by Runtime_DebugReferencedBy below.
-static int DebugReferencedBy(HeapIterator* iterator, JSObject* target,
- Object* instance_filter, int max_references,
- FixedArray* instances, int instances_size,
- JSFunction* arguments_function) {
- Isolate* isolate = target->GetIsolate();
- SealHandleScope shs(isolate);
- DisallowHeapAllocation no_allocation;
-
- // Iterate the heap.
- int count = 0;
- JSObject* last = NULL;
- HeapObject* heap_obj = NULL;
- while (((heap_obj = iterator->next()) != NULL) &&
- (max_references == 0 || count < max_references)) {
- // Only look at all JSObjects.
- if (heap_obj->IsJSObject()) {
- // Skip context extension objects and argument arrays as these are
- // checked in the context of functions using them.
- JSObject* obj = JSObject::cast(heap_obj);
- if (obj->IsJSContextExtensionObject() ||
- obj->map()->GetConstructor() == arguments_function) {
- continue;
- }
-
- // Check if the JS object has a reference to the object looked for.
- if (obj->ReferencesObject(target)) {
- // Check instance filter if supplied. This is normally used to avoid
- // references from mirror objects (see Runtime_IsInPrototypeChain).
- if (!instance_filter->IsUndefined()) {
- for (PrototypeIterator iter(isolate, obj); !iter.IsAtEnd();
- iter.Advance()) {
- if (iter.GetCurrent() == instance_filter) {
- obj = NULL; // Don't add this object.
- break;
- }
- }
- }
-
- if (obj != NULL) {
- // Valid reference found add to instance array if supplied an update
- // count.
- if (instances != NULL && count < instances_size) {
- instances->set(count, obj);
- }
- last = obj;
- count++;
- }
- }
- }
- }
-
- // Check for circular reference only. This can happen when the object is only
- // referenced from mirrors and has a circular reference in which case the
- // object is not really alive and would have been garbage collected if not
- // referenced from the mirror.
- if (count == 1 && last == target) {
- count = 0;
- }
-
- // Return the number of referencing objects found.
- return count;
-}
-
-
// Scan the heap for objects with direct references to an object
// args[0]: the object to find references to
// args[1]: constructor function for instances to exclude (Mirror)
@@ -2832,79 +1366,54 @@ static int DebugReferencedBy(HeapIterator* iterator, JSObject* target,
RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
-
- // Check parameters.
CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, instance_filter, 1);
- RUNTIME_ASSERT(instance_filter->IsUndefined() ||
- instance_filter->IsJSObject());
+ CONVERT_ARG_HANDLE_CHECKED(Object, filter, 1);
+ RUNTIME_ASSERT(filter->IsUndefined() || filter->IsJSObject());
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
RUNTIME_ASSERT(max_references >= 0);
-
- // Get the constructor function for context extension and arguments array.
- Handle<JSFunction> arguments_function(
- JSFunction::cast(isolate->sloppy_arguments_map()->GetConstructor()));
-
- // Get the number of referencing objects.
- int count;
- // First perform a full GC in order to avoid dead objects and to make the heap
- // iterable.
+ List<Handle<JSObject> > instances;
Heap* heap = isolate->heap();
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy");
- {
- HeapIterator heap_iterator(heap);
- count = DebugReferencedBy(&heap_iterator, *target, *instance_filter,
- max_references, NULL, 0, *arguments_function);
- }
-
- // Allocate an array to hold the result.
- Handle<FixedArray> instances = isolate->factory()->NewFixedArray(count);
-
- // Fill the referencing objects.
{
- HeapIterator heap_iterator(heap);
- count = DebugReferencedBy(&heap_iterator, *target, *instance_filter,
- max_references, *instances, count,
- *arguments_function);
- }
-
- // Return result as JS array.
- Handle<JSFunction> constructor = isolate->array_function();
-
- Handle<JSObject> result = isolate->factory()->NewJSObject(constructor);
- JSArray::SetContent(Handle<JSArray>::cast(result), instances);
- return *result;
-}
-
-
-// Helper function used by Runtime_DebugConstructedBy below.
-static int DebugConstructedBy(HeapIterator* iterator, JSFunction* constructor,
- int max_references, FixedArray* instances,
- int instances_size) {
- DisallowHeapAllocation no_allocation;
-
- // Iterate the heap.
- int count = 0;
- HeapObject* heap_obj = NULL;
- while (((heap_obj = iterator->next()) != NULL) &&
- (max_references == 0 || count < max_references)) {
- // Only look at all JSObjects.
- if (heap_obj->IsJSObject()) {
+ HeapIterator iterator(heap, HeapIterator::kFilterUnreachable);
+ // Get the constructor function for context extension and arguments array.
+ Object* arguments_fun = isolate->sloppy_arguments_map()->GetConstructor();
+ HeapObject* heap_obj;
+ while ((heap_obj = iterator.next())) {
+ if (!heap_obj->IsJSObject()) continue;
JSObject* obj = JSObject::cast(heap_obj);
- if (obj->map()->GetConstructor() == constructor) {
- // Valid reference found add to instance array if supplied an update
- // count.
- if (instances != NULL && count < instances_size) {
- instances->set(count, obj);
- }
- count++;
+ if (obj->IsJSContextExtensionObject()) continue;
+ if (obj->map()->GetConstructor() == arguments_fun) continue;
+ if (!obj->ReferencesObject(*target)) continue;
+ // Check filter if supplied. This is normally used to avoid
+ // references from mirror objects.
+ if (!filter->IsUndefined() &&
+ obj->HasInPrototypeChain(isolate, *filter)) {
+ continue;
+ }
+ if (obj->IsJSGlobalObject()) {
+ obj = JSGlobalObject::cast(obj)->global_proxy();
}
+ instances.Add(Handle<JSObject>(obj));
+ if (instances.length() == max_references) break;
+ }
+ // Iterate the rest of the heap to satisfy HeapIterator constraints.
+ while (iterator.next()) {
}
}
- // Return the number of referencing objects found.
- return count;
+ Handle<FixedArray> result;
+ if (instances.length() == 1 && instances.last().is_identical_to(target)) {
+ // Check for circular reference only. This can happen when the object is
+ // only referenced from mirrors and has a circular reference in which case
+ // the object is not really alive and would have been garbage collected if
+ // not referenced from the mirror.
+ result = isolate->factory()->empty_fixed_array();
+ } else {
+ result = isolate->factory()->NewFixedArray(instances.length());
+ for (int i = 0; i < instances.length(); ++i) result->set(i, *instances[i]);
+ }
+ return *isolate->factory()->NewJSArrayWithElements(result);
}
@@ -2914,40 +1423,31 @@ static int DebugConstructedBy(HeapIterator* iterator, JSFunction* constructor,
RUNTIME_FUNCTION(Runtime_DebugConstructedBy) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
-
-
- // Check parameters.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]);
RUNTIME_ASSERT(max_references >= 0);
- // Get the number of referencing objects.
- int count;
- // First perform a full GC in order to avoid dead objects and to make the heap
- // iterable.
+ List<Handle<JSObject> > instances;
Heap* heap = isolate->heap();
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy");
- {
- HeapIterator heap_iterator(heap);
- count = DebugConstructedBy(&heap_iterator, *constructor, max_references,
- NULL, 0);
- }
-
- // Allocate an array to hold the result.
- Handle<FixedArray> instances = isolate->factory()->NewFixedArray(count);
-
- // Fill the referencing objects.
{
- HeapIterator heap_iterator2(heap);
- count = DebugConstructedBy(&heap_iterator2, *constructor, max_references,
- *instances, count);
+ HeapIterator iterator(heap, HeapIterator::kFilterUnreachable);
+ HeapObject* heap_obj;
+ while ((heap_obj = iterator.next())) {
+ if (!heap_obj->IsJSObject()) continue;
+ JSObject* obj = JSObject::cast(heap_obj);
+ if (obj->map()->GetConstructor() != *constructor) continue;
+ instances.Add(Handle<JSObject>(obj));
+ if (instances.length() == max_references) break;
+ }
+ // Iterate the rest of the heap to satisfy HeapIterator constraints.
+ while (iterator.next()) {
+ }
}
- // Return result as JS array.
- Handle<JSFunction> array_function = isolate->array_function();
- Handle<JSObject> result = isolate->factory()->NewJSObject(array_function);
- JSArray::SetContent(Handle<JSArray>::cast(result), instances);
- return *result;
+ Handle<FixedArray> result =
+ isolate->factory()->NewFixedArray(instances.length());
+ for (int i = 0; i < instances.length(); ++i) result->set(i, *instances[i]);
+ return *isolate->factory()->NewJSArrayWithElements(result);
}
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
index c793e88b92..ecd55d172a 100644
--- a/deps/v8/src/runtime/runtime-forin.cc
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arguments.h"
#include "src/runtime/runtime-utils.h"
-#include "src/v8.h"
+
+#include "src/arguments.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 749e16b9ed..a368f1b14c 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/accessors.h"
#include "src/arguments.h"
#include "src/compiler.h"
#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
-#include "src/frames.h"
+#include "src/frames-inl.h"
#include "src/messages.h"
-#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
@@ -328,9 +327,8 @@ RUNTIME_FUNCTION(Runtime_SetForceInlineFlag) {
// Find the arguments of the JavaScript function invocation that called
// into C++ code. Collect these in a newly allocated array of handles (possibly
// prefixed by a number of empty handles).
-static SmartArrayPointer<Handle<Object> > GetCallerArguments(Isolate* isolate,
- int prefix_argc,
- int* total_argc) {
+static base::SmartArrayPointer<Handle<Object> > GetCallerArguments(
+ Isolate* isolate, int prefix_argc, int* total_argc) {
// Find frame containing arguments passed to the caller.
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
@@ -355,7 +353,7 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments(Isolate* isolate,
argument_count--;
*total_argc = prefix_argc + argument_count;
- SmartArrayPointer<Handle<Object> > param_data(
+ base::SmartArrayPointer<Handle<Object> > param_data(
NewArray<Handle<Object> >(*total_argc));
bool should_deoptimize = false;
for (int i = 0; i < argument_count; i++) {
@@ -376,7 +374,7 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments(Isolate* isolate,
int args_count = frame->ComputeParametersCount();
*total_argc = prefix_argc + args_count;
- SmartArrayPointer<Handle<Object> > param_data(
+ base::SmartArrayPointer<Handle<Object> > param_data(
NewArray<Handle<Object> >(*total_argc));
for (int i = 0; i < args_count; i++) {
Handle<Object> val = Handle<Object>(frame->GetParameter(i), isolate);
@@ -401,7 +399,7 @@ RUNTIME_FUNCTION(Runtime_FunctionBindArguments) {
bound_function->shared()->set_inferred_name(isolate->heap()->empty_string());
// Get all arguments of calling function (Function.prototype.bind).
int argc = 0;
- SmartArrayPointer<Handle<Object> > arguments =
+ base::SmartArrayPointer<Handle<Object> > arguments =
GetCallerArguments(isolate, 0, &argc);
// Don't count the this-arg.
if (argc > 0) {
@@ -443,9 +441,18 @@ RUNTIME_FUNCTION(Runtime_FunctionBindArguments) {
// Update length. Have to remove the prototype first so that map migration
// is happy about the number of fields.
RUNTIME_ASSERT(bound_function->RemovePrototype());
+
+ // The new function should have the same [[Prototype]] as the bindee.
Handle<Map> bound_function_map(
isolate->native_context()->bound_function_map());
+ PrototypeIterator iter(isolate, bindee);
+ Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
+ if (bound_function_map->prototype() != *proto) {
+ bound_function_map = Map::TransitionToPrototype(bound_function_map, proto,
+ REGULAR_PROTOTYPE);
+ }
JSObject::MigrateToMap(bound_function, bound_function_map);
+
Handle<String> length_string = isolate->factory()->length_string();
// These attributes must be kept in sync with how the bootstrapper
// configures the bound_function_map retrieved above.
@@ -494,7 +501,7 @@ RUNTIME_FUNCTION(Runtime_NewObjectFromBound) {
!Handle<JSFunction>::cast(bound_function)->shared()->bound());
int total_argc = 0;
- SmartArrayPointer<Handle<Object> > param_data =
+ base::SmartArrayPointer<Handle<Object> > param_data =
GetCallerArguments(isolate, bound_argc, &total_argc);
for (int i = 0; i < bound_argc; i++) {
param_data[i] = Handle<Object>(
@@ -526,12 +533,12 @@ RUNTIME_FUNCTION(Runtime_Call) {
// If there are too many arguments, allocate argv via malloc.
const int argv_small_size = 10;
Handle<Object> argv_small_buffer[argv_small_size];
- SmartArrayPointer<Handle<Object> > argv_large_buffer;
+ base::SmartArrayPointer<Handle<Object> > argv_large_buffer;
Handle<Object>* argv = argv_small_buffer;
if (argc > argv_small_size) {
argv = new Handle<Object>[argc];
if (argv == NULL) return isolate->StackOverflow();
- argv_large_buffer = SmartArrayPointer<Handle<Object> >(argv);
+ argv_large_buffer = base::SmartArrayPointer<Handle<Object> >(argv);
}
for (int i = 0; i < argc; ++i) {
@@ -565,12 +572,12 @@ RUNTIME_FUNCTION(Runtime_Apply) {
// If there are too many arguments, allocate argv via malloc.
const int argv_small_size = 10;
Handle<Object> argv_small_buffer[argv_small_size];
- SmartArrayPointer<Handle<Object> > argv_large_buffer;
+ base::SmartArrayPointer<Handle<Object> > argv_large_buffer;
Handle<Object>* argv = argv_small_buffer;
if (argc > argv_small_size) {
argv = new Handle<Object>[argc];
if (argv == NULL) return isolate->StackOverflow();
- argv_large_buffer = SmartArrayPointer<Handle<Object> >(argv);
+ argv_large_buffer = base::SmartArrayPointer<Handle<Object> >(argv);
}
for (int i = 0; i < argc; ++i) {
diff --git a/deps/v8/src/runtime/runtime-futex.cc b/deps/v8/src/runtime/runtime-futex.cc
new file mode 100644
index 0000000000..a96758d9f3
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-futex.cc
@@ -0,0 +1,93 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/runtime/runtime-utils.h"
+
+#include "src/arguments.h"
+#include "src/base/platform/time.h"
+#include "src/conversions-inl.h"
+#include "src/futex-emulation.h"
+#include "src/globals.h"
+
+// Implement Futex API for SharedArrayBuffers as defined in the
+// SharedArrayBuffer draft spec, found here:
+// https://github.com/lars-t-hansen/ecmascript_sharedmem
+
+namespace v8 {
+namespace internal {
+
+RUNTIME_FUNCTION(Runtime_AtomicsFutexWait) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
+ CONVERT_SIZE_ARG_CHECKED(index, 1);
+ CONVERT_INT32_ARG_CHECKED(value, 2);
+ CONVERT_DOUBLE_ARG_CHECKED(timeout, 3);
+ RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
+ RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+ RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
+ RUNTIME_ASSERT(timeout == V8_INFINITY || !std::isnan(timeout));
+
+ Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
+ size_t addr = index << 2;
+
+ return FutexEmulation::Wait(isolate, array_buffer, addr, value, timeout);
+}
+
+
+RUNTIME_FUNCTION(Runtime_AtomicsFutexWake) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
+ CONVERT_SIZE_ARG_CHECKED(index, 1);
+ CONVERT_INT32_ARG_CHECKED(count, 2);
+ RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
+ RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+ RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
+
+ Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
+ size_t addr = index << 2;
+
+ return FutexEmulation::Wake(isolate, array_buffer, addr, count);
+}
+
+
+RUNTIME_FUNCTION(Runtime_AtomicsFutexWakeOrRequeue) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 5);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
+ CONVERT_SIZE_ARG_CHECKED(index1, 1);
+ CONVERT_INT32_ARG_CHECKED(count, 2);
+ CONVERT_INT32_ARG_CHECKED(value, 3);
+ CONVERT_SIZE_ARG_CHECKED(index2, 4);
+ RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
+ RUNTIME_ASSERT(index1 < NumberToSize(isolate, sta->length()));
+ RUNTIME_ASSERT(index2 < NumberToSize(isolate, sta->length()));
+ RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
+
+ Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
+ size_t addr1 = index1 << 2;
+ size_t addr2 = index2 << 2;
+
+ return FutexEmulation::WakeOrRequeue(isolate, array_buffer, addr1, count,
+ value, addr2);
+}
+
+
+RUNTIME_FUNCTION(Runtime_AtomicsFutexNumWaitersForTesting) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
+ CONVERT_SIZE_ARG_CHECKED(index, 1);
+ RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
+ RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+ RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
+
+ Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
+ size_t addr = index << 2;
+
+ return FutexEmulation::NumWaitersForTesting(isolate, array_buffer, addr);
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index ed86c4dd74..208f7f6680 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/factory.h"
#include "src/frames-inl.h"
-#include "src/runtime/runtime-utils.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-i18n.cc b/deps/v8/src/runtime/runtime-i18n.cc
index 3da71a98ef..73d511074a 100644
--- a/deps/v8/src/runtime/runtime-i18n.cc
+++ b/deps/v8/src/runtime/runtime-i18n.cc
@@ -4,13 +4,14 @@
#ifdef V8_I18N_SUPPORT
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/api.h"
#include "src/api-natives.h"
#include "src/arguments.h"
+#include "src/factory.h"
#include "src/i18n.h"
#include "src/messages.h"
-#include "src/runtime/runtime-utils.h"
#include "unicode/brkiter.h"
#include "unicode/calendar.h"
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 290d7af2fa..01e3e913af 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -2,15 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
#include "src/bootstrapper.h"
-#include "src/debug.h"
+#include "src/conversions.h"
+#include "src/debug/debug.h"
+#include "src/frames-inl.h"
#include "src/messages.h"
#include "src/parser.h"
#include "src/prettyprinter.h"
-#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
@@ -23,6 +24,36 @@ RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
}
+RUNTIME_FUNCTION(Runtime_ImportToRuntime) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
+ RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
+ Bootstrapper::ImportNatives(isolate, container);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_ImportExperimentalToRuntime) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
+ RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
+ Bootstrapper::ImportExperimentalNatives(isolate, container);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_InstallJSBuiltins) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
+ RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
+ Bootstrapper::InstallJSBuiltins(isolate, container);
+ return isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(Runtime_Throw) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -222,116 +253,21 @@ RUNTIME_FUNCTION(Runtime_RenderCallSite) {
if (location.start_pos() == -1) return isolate->heap()->empty_string();
Zone zone;
- SmartPointer<ParseInfo> info(location.function()->shared()->is_function()
- ? new ParseInfo(&zone, location.function())
- : new ParseInfo(&zone, location.script()));
+ base::SmartPointer<ParseInfo> info(
+ location.function()->shared()->is_function()
+ ? new ParseInfo(&zone, location.function())
+ : new ParseInfo(&zone, location.script()));
if (!Parser::ParseStatic(info.get())) {
isolate->clear_pending_exception();
return isolate->heap()->empty_string();
}
CallPrinter printer(isolate, &zone);
- const char* string = printer.Print(info->function(), location.start_pos());
+ const char* string = printer.Print(info->literal(), location.start_pos());
return *isolate->factory()->NewStringFromAsciiChecked(string);
}
-RUNTIME_FUNCTION(Runtime_GetFromCacheRT) {
- SealHandleScope shs(isolate);
- // This is only called from codegen, so checks might be more lax.
- CONVERT_ARG_CHECKED(JSFunctionResultCache, cache, 0);
- CONVERT_ARG_CHECKED(Object, key, 1);
-
- {
- DisallowHeapAllocation no_alloc;
-
- int finger_index = cache->finger_index();
- Object* o = cache->get(finger_index);
- if (o == key) {
- // The fastest case: hit the same place again.
- return cache->get(finger_index + 1);
- }
-
- for (int i = finger_index - 2; i >= JSFunctionResultCache::kEntriesIndex;
- i -= 2) {
- o = cache->get(i);
- if (o == key) {
- cache->set_finger_index(i);
- return cache->get(i + 1);
- }
- }
-
- int size = cache->size();
- DCHECK(size <= cache->length());
-
- for (int i = size - 2; i > finger_index; i -= 2) {
- o = cache->get(i);
- if (o == key) {
- cache->set_finger_index(i);
- return cache->get(i + 1);
- }
- }
- }
-
- // There is no value in the cache. Invoke the function and cache result.
- HandleScope scope(isolate);
-
- Handle<JSFunctionResultCache> cache_handle(cache);
- Handle<Object> key_handle(key, isolate);
- Handle<Object> value;
- {
- Handle<JSFunction> factory(JSFunction::cast(
- cache_handle->get(JSFunctionResultCache::kFactoryIndex)));
- // TODO(antonm): consider passing a receiver when constructing a cache.
- Handle<JSObject> receiver(isolate->global_proxy());
- // This handle is nor shared, nor used later, so it's safe.
- Handle<Object> argv[] = {key_handle};
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, value,
- Execution::Call(isolate, factory, receiver, arraysize(argv), argv));
- }
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- cache_handle->JSFunctionResultCacheVerify();
- }
-#endif
-
- // Function invocation may have cleared the cache. Reread all the data.
- int finger_index = cache_handle->finger_index();
- int size = cache_handle->size();
-
- // If we have spare room, put new data into it, otherwise evict post finger
- // entry which is likely to be the least recently used.
- int index = -1;
- if (size < cache_handle->length()) {
- cache_handle->set_size(size + JSFunctionResultCache::kEntrySize);
- index = size;
- } else {
- index = finger_index + JSFunctionResultCache::kEntrySize;
- if (index == cache_handle->length()) {
- index = JSFunctionResultCache::kEntriesIndex;
- }
- }
-
- DCHECK(index % 2 == 0);
- DCHECK(index >= JSFunctionResultCache::kEntriesIndex);
- DCHECK(index < cache_handle->length());
-
- cache_handle->set(index, *key_handle);
- cache_handle->set(index + 1, *value);
- cache_handle->set_finger_index(index);
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- cache_handle->JSFunctionResultCacheVerify();
- }
-#endif
-
- return *value;
-}
-
-
RUNTIME_FUNCTION(Runtime_MessageGetStartPosition) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -348,6 +284,18 @@ RUNTIME_FUNCTION(Runtime_MessageGetScript) {
}
+RUNTIME_FUNCTION(Runtime_ErrorToStringRT) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, error, 0);
+ Handle<String> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ isolate->error_tostring_helper()->Stringify(isolate, error));
+ return *result;
+}
+
+
RUNTIME_FUNCTION(Runtime_FormatMessageString) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
@@ -412,15 +360,6 @@ RUNTIME_FUNCTION(Runtime_IS_VAR) {
}
-RUNTIME_FUNCTION(Runtime_GetFromCache) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_SMI_ARG_CHECKED(id, 0);
- args[0] = isolate->native_context()->jsfunction_result_caches()->get(id);
- return __RT_impl_Runtime_GetFromCacheRT(args, isolate);
-}
-
-
RUNTIME_FUNCTION(Runtime_IncrementStatsCounter) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -467,5 +406,12 @@ RUNTIME_FUNCTION(Runtime_GetCallerJSFunction) {
RUNTIME_ASSERT(it.frame()->type() == StackFrame::JAVA_SCRIPT);
return JavaScriptFrame::cast(it.frame())->function();
}
+
+
+RUNTIME_FUNCTION(Runtime_GetCodeStubExportsObject) {
+ HandleScope shs(isolate);
+ return isolate->heap()->code_stub_exports_object();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-json.cc b/deps/v8/src/runtime/runtime-json.cc
index 68f76c56a8..64a42bfede 100644
--- a/deps/v8/src/runtime/runtime-json.cc
+++ b/deps/v8/src/runtime/runtime-json.cc
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/char-predicates-inl.h"
#include "src/json-parser.h"
#include "src/json-stringifier.h"
-#include "src/runtime/runtime-utils.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 80af962e3c..f434747e28 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -2,14 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/allocation-site-scopes.h"
#include "src/arguments.h"
#include "src/ast.h"
#include "src/parser.h"
#include "src/runtime/runtime.h"
-#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-liveedit.cc b/deps/v8/src/runtime/runtime-liveedit.cc
index 555fb6a74b..3b8dad9b5e 100644
--- a/deps/v8/src/runtime/runtime-liveedit.cc
+++ b/deps/v8/src/runtime/runtime-liveedit.cc
@@ -2,43 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
-#include "src/debug.h"
-#include "src/liveedit.h"
+#include "src/debug/debug.h"
+#include "src/debug/debug-frames.h"
+#include "src/debug/liveedit.h"
+#include "src/frames-inl.h"
#include "src/runtime/runtime.h"
-#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
-
-static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
- Script* script,
- FixedArray* buffer) {
- DisallowHeapAllocation no_allocation;
- int counter = 0;
- int buffer_size = buffer->length();
- for (HeapObject* obj = iterator->next(); obj != NULL;
- obj = iterator->next()) {
- DCHECK(obj != NULL);
- if (!obj->IsSharedFunctionInfo()) {
- continue;
- }
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
- if (shared->script() != script) {
- continue;
- }
- if (counter < buffer_size) {
- buffer->set(counter, shared);
- }
- counter++;
- }
- return counter;
-}
-
-
// For a script finds all SharedFunctionInfo's in the heap that points
// to this script. Returns JSArray of SharedFunctionInfo wrapped
// in OpaqueReferences.
@@ -51,32 +26,29 @@ RUNTIME_FUNCTION(Runtime_LiveEditFindSharedFunctionInfosForScript) {
RUNTIME_ASSERT(script_value->value()->IsScript());
Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
- const int kBufferSize = 32;
-
- Handle<FixedArray> array;
- array = isolate->factory()->NewFixedArray(kBufferSize);
- int number;
+ List<Handle<SharedFunctionInfo> > found;
Heap* heap = isolate->heap();
{
- HeapIterator heap_iterator(heap);
- Script* scr = *script;
- FixedArray* arr = *array;
- number = FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
- }
- if (number > kBufferSize) {
- array = isolate->factory()->NewFixedArray(number);
- HeapIterator heap_iterator(heap);
- Script* scr = *script;
- FixedArray* arr = *array;
- FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
+ HeapIterator iterator(heap);
+ HeapObject* heap_obj;
+ while ((heap_obj = iterator.next())) {
+ if (!heap_obj->IsSharedFunctionInfo()) continue;
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(heap_obj);
+ if (shared->script() != *script) continue;
+ found.Add(Handle<SharedFunctionInfo>(shared));
+ }
}
- Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(array);
- result->set_length(Smi::FromInt(number));
-
- LiveEdit::WrapSharedFunctionInfos(result);
-
- return *result;
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(found.length());
+ for (int i = 0; i < found.length(); ++i) {
+ Handle<SharedFunctionInfo> shared = found[i];
+ SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create(isolate);
+ Handle<String> name(String::cast(shared->name()));
+ info_wrapper.SetProperties(name, shared->start_position(),
+ shared->end_position(), shared);
+ result->set(i, *info_wrapper.GetJSArray());
+ }
+ return *isolate->factory()->NewJSArrayWithElements(result);
}
@@ -280,7 +252,8 @@ RUNTIME_FUNCTION(Runtime_LiveEditRestartFrame) {
}
JavaScriptFrameIterator it(isolate, id);
- int inlined_jsframe_index = Runtime::FindIndexedNonNativeFrame(&it, index);
+ int inlined_jsframe_index =
+ DebugFrameHelper::FindIndexedNonNativeFrame(&it, index);
if (inlined_jsframe_index == -1) return heap->undefined_value();
// We don't really care what the inlined frame index is, since we are
// throwing away the entire frame anyways.
diff --git a/deps/v8/src/runtime/runtime-maths.cc b/deps/v8/src/runtime/runtime-maths.cc
index 474b463291..504261679e 100644
--- a/deps/v8/src/runtime/runtime-maths.cc
+++ b/deps/v8/src/runtime/runtime-maths.cc
@@ -2,15 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
#include "src/assembler.h"
#include "src/codegen.h"
-#include "src/runtime/runtime-utils.h"
#include "src/third_party/fdlibm/fdlibm.h"
-
namespace v8 {
namespace internal {
@@ -135,7 +133,7 @@ RUNTIME_FUNCTION(Runtime_MathFloor) {
// Slow version of Math.pow. We check for fast paths for special cases.
// Used if VFP3 is not available.
-RUNTIME_FUNCTION(Runtime_MathPowSlow) {
+RUNTIME_FUNCTION(Runtime_MathPow) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
isolate->counters()->math_pow()->Increment();
@@ -238,12 +236,6 @@ RUNTIME_FUNCTION(Runtime_MathFround) {
}
-RUNTIME_FUNCTION(Runtime_MathPow) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_MathPowSlow(args, isolate);
-}
-
-
RUNTIME_FUNCTION(Runtime_IsMinusZero) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index 3be4cc0f9f..49734ba8dd 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
-#include "src/runtime/runtime-utils.h"
#ifndef _STLP_VENDOR_CSTD
@@ -231,7 +230,7 @@ RUNTIME_FUNCTION(Runtime_StringParseFloat) {
}
-RUNTIME_FUNCTION(Runtime_NumberToStringRT) {
+RUNTIME_FUNCTION(Runtime_NumberToString) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
@@ -271,24 +270,6 @@ RUNTIME_FUNCTION(Runtime_NumberToIntegerMapMinusZero) {
}
-RUNTIME_FUNCTION(Runtime_NumberToJSUint32) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]);
- return *isolate->factory()->NewNumberFromUint(number);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberToJSInt32) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(number, 0);
- return *isolate->factory()->NewNumberFromInt(DoubleToInt32(number));
-}
-
-
// Converts a Number to a Smi, if possible. Returns NaN if the number is not
// a small integer.
RUNTIME_FUNCTION(Runtime_NumberToSmi) {
@@ -558,12 +539,6 @@ RUNTIME_FUNCTION(Runtime_MaxSmi) {
}
-RUNTIME_FUNCTION(Runtime_NumberToString) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_NumberToStringRT(args, isolate);
-}
-
-
RUNTIME_FUNCTION(Runtime_IsSmi) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 9536ec0cc4..26f74efd15 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -2,42 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
#include "src/bootstrapper.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/messages.h"
#include "src/runtime/runtime.h"
-#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
-// Returns a single character string where first character equals
-// string->Get(index).
-static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
- DCHECK_LT(index, static_cast<uint32_t>(string->length()));
- Factory* factory = string->GetIsolate()->factory();
- return factory->LookupSingleCharacterStringFromCode(
- String::Flatten(string)->Get(index));
-}
-
-
-MaybeHandle<Object> Runtime::GetElementOrCharAt(Isolate* isolate,
- Handle<Object> object,
- uint32_t index,
- LanguageMode language_mode) {
- // Handle [] indexing on Strings
- if (object->IsString() &&
- index < static_cast<uint32_t>(String::cast(*object)->length())) {
- Handle<Object> result = GetCharAt(Handle<String>::cast(object), index);
- if (!result->IsUndefined()) return result;
- }
-
- return Object::GetElement(isolate, object, index, language_mode);
-}
-
MaybeHandle<Name> Runtime::ToName(Isolate* isolate, Handle<Object> key) {
if (key->IsName()) {
@@ -65,7 +40,7 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
// Check if the given key is an array index.
uint32_t index = 0;
if (key->ToArrayIndex(&index)) {
- return GetElementOrCharAt(isolate, object, index, language_mode);
+ return Object::GetElement(isolate, object, index, language_mode);
}
// Convert the key to a name - possibly by calling back into JavaScript.
@@ -77,7 +52,7 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
// TODO(verwaest): Make sure GetProperty(LookupIterator*) can handle this, and
// remove the special casing here.
if (name->AsArrayIndex(&index)) {
- return GetElementOrCharAt(isolate, object, index);
+ return Object::GetElement(isolate, object, index);
} else {
return Object::GetProperty(object, name, language_mode);
}
@@ -153,7 +128,9 @@ MaybeHandle<Object> Runtime::KeyedGetObjectProperty(
Handle<String> str = Handle<String>::cast(receiver_obj);
int index = Handle<Smi>::cast(key_obj)->value();
if (index >= 0 && index < str->length()) {
- return GetCharAt(str, index);
+ Factory* factory = isolate->factory();
+ return factory->LookupSingleCharacterStringFromCode(
+ String::Flatten(str)->Get(index));
}
}
@@ -194,18 +171,13 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
// Check if the given key is an array index.
uint32_t index = 0;
if (key->ToArrayIndex(&index)) {
- // TODO(verwaest): Support other objects as well.
- if (!object->IsJSReceiver()) return value;
- return JSReceiver::SetElement(Handle<JSReceiver>::cast(object), index,
- value, language_mode);
+ return Object::SetElement(isolate, object, index, value, language_mode);
}
Handle<Name> name;
ASSIGN_RETURN_ON_EXCEPTION(isolate, name, ToName(isolate, key), Object);
LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name);
- // TODO(verwaest): Support other objects as well.
- if (it.IsElement() && !object->IsJSReceiver()) return value;
return Object::SetProperty(&it, value, language_mode,
Object::MAY_BE_STORE_FROM_KEYED);
}
@@ -220,10 +192,7 @@ MaybeHandle<Object> Runtime::GetPrototype(Isolate* isolate,
if (PrototypeIterator::GetCurrent(iter)->IsAccessCheckNeeded() &&
!isolate->MayAccess(
Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)))) {
- isolate->ReportFailedAccessCheck(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)));
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->undefined_value();
+ return isolate->factory()->null_value();
}
iter.AdvanceIgnoringProxies();
if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
@@ -294,17 +263,12 @@ RUNTIME_FUNCTION(Runtime_SetPrototype) {
RUNTIME_FUNCTION(Runtime_IsInPrototypeChain) {
- HandleScope shs(isolate);
+ SealHandleScope shs(isolate);
DCHECK(args.length() == 2);
// See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8).
- CONVERT_ARG_HANDLE_CHECKED(Object, O, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, V, 1);
- PrototypeIterator iter(isolate, V, PrototypeIterator::START_AT_RECEIVER);
- while (true) {
- iter.AdvanceIgnoringProxies();
- if (iter.IsAtEnd()) return isolate->heap()->false_value();
- if (iter.IsAtEnd(O)) return isolate->heap()->true_value();
- }
+ CONVERT_ARG_CHECKED(Object, O, 0);
+ CONVERT_ARG_CHECKED(Object, V, 1);
+ return isolate->heap()->ToBoolean(V->HasInPrototypeChain(isolate, O));
}
@@ -448,6 +412,102 @@ RUNTIME_FUNCTION(Runtime_ObjectSeal) {
}
+RUNTIME_FUNCTION(Runtime_LoadGlobalViaContext) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_SMI_ARG_CHECKED(slot, 0);
+
+ // Go up context chain to the script context.
+ Handle<Context> script_context(isolate->context()->script_context(), isolate);
+ DCHECK(script_context->IsScriptContext());
+ DCHECK(script_context->get(slot)->IsPropertyCell());
+
+ // Lookup the named property on the global object.
+ Handle<ScopeInfo> scope_info(ScopeInfo::cast(script_context->extension()),
+ isolate);
+ Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
+ Handle<GlobalObject> global_object(script_context->global_object(), isolate);
+ LookupIterator it(global_object, name, LookupIterator::HIDDEN);
+
+ // Switch to fast mode only if there is a data property and it's not on
+ // a hidden prototype.
+ if (it.state() == LookupIterator::DATA &&
+ it.GetHolder<Object>().is_identical_to(global_object)) {
+ // Now update the cell in the script context.
+ Handle<PropertyCell> cell = it.GetPropertyCell();
+ script_context->set(slot, *cell);
+ } else {
+ // This is not a fast case, so keep this access in a slow mode.
+ // Store empty_property_cell here to release the outdated property cell.
+ script_context->set(slot, isolate->heap()->empty_property_cell());
+ }
+
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Object::GetProperty(&it));
+ return *result;
+}
+
+
+namespace {
+
+Object* StoreGlobalViaContext(Isolate* isolate, int slot, Handle<Object> value,
+ LanguageMode language_mode) {
+ // Go up context chain to the script context.
+ Handle<Context> script_context(isolate->context()->script_context(), isolate);
+ DCHECK(script_context->IsScriptContext());
+ DCHECK(script_context->get(slot)->IsPropertyCell());
+
+ // Lookup the named property on the global object.
+ Handle<ScopeInfo> scope_info(ScopeInfo::cast(script_context->extension()),
+ isolate);
+ Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
+ Handle<GlobalObject> global_object(script_context->global_object(), isolate);
+ LookupIterator it(global_object, name, LookupIterator::HIDDEN);
+
+ // Switch to fast mode only if there is a data property and it's not on
+ // a hidden prototype.
+ if (it.state() == LookupIterator::DATA &&
+ it.GetHolder<Object>().is_identical_to(global_object)) {
+ // Now update cell in the script context.
+ Handle<PropertyCell> cell = it.GetPropertyCell();
+ script_context->set(slot, *cell);
+ } else {
+ // This is not a fast case, so keep this access in a slow mode.
+ // Store empty_property_cell here to release the outdated property cell.
+ script_context->set(slot, isolate->heap()->empty_property_cell());
+ }
+
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Object::SetProperty(&it, value, language_mode,
+ Object::CERTAINLY_NOT_STORE_FROM_KEYED));
+ return *result;
+}
+
+} // namespace
+
+
+RUNTIME_FUNCTION(Runtime_StoreGlobalViaContext_Sloppy) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_SMI_ARG_CHECKED(slot, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+
+ return StoreGlobalViaContext(isolate, slot, value, SLOPPY);
+}
+
+
+RUNTIME_FUNCTION(Runtime_StoreGlobalViaContext_Strict) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_SMI_ARG_CHECKED(slot, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+
+ return StoreGlobalViaContext(isolate, slot, value, STRICT);
+}
+
+
RUNTIME_FUNCTION(Runtime_GetProperty) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -605,12 +665,16 @@ RUNTIME_FUNCTION(Runtime_SetProperty) {
}
-RUNTIME_FUNCTION(Runtime_DeleteProperty) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 2);
+namespace {
+
+// ES6 section 12.5.4.
+Object* DeleteProperty(Isolate* isolate, Handle<Object> object,
+ Handle<Object> key, LanguageMode language_mode) {
+ Handle<JSReceiver> receiver;
+ if (!JSReceiver::ToObject(isolate, object).ToHandle(&receiver)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
+ }
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -618,6 +682,26 @@ RUNTIME_FUNCTION(Runtime_DeleteProperty) {
return *result;
}
+} // namespace
+
+
+RUNTIME_FUNCTION(Runtime_DeleteProperty_Sloppy) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ return DeleteProperty(isolate, object, key, SLOPPY);
+}
+
+
+RUNTIME_FUNCTION(Runtime_DeleteProperty_Strict) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ return DeleteProperty(isolate, object, key, STRICT);
+}
+
static Object* HasOwnPropertyImplementation(Isolate* isolate,
Handle<JSObject> object,
@@ -728,26 +812,10 @@ RUNTIME_FUNCTION(Runtime_IsPropertyEnumerable) {
}
-RUNTIME_FUNCTION(Runtime_GetPropertyNames) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
- Handle<JSArray> result;
-
- isolate->counters()->for_in()->Increment();
- Handle<FixedArray> elements;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, elements,
- JSReceiver::GetKeys(object, JSReceiver::INCLUDE_PROTOS));
- return *isolate->factory()->NewJSArrayWithElements(elements);
-}
-
-
-// Returns either a FixedArray as Runtime_GetPropertyNames,
-// or, if the given object has an enum cache that contains
-// all enumerable properties of the object and its prototypes
-// have none, the map of the object. This is used to speed up
-// the check for deletions during a for-in.
+// Returns either a FixedArray or, if the given object has an enum cache that
+// contains all enumerable properties of the object and its prototypes have
+// none, the map of the object. This is used to speed up the check for
+// deletions during a for-in.
RUNTIME_FUNCTION(Runtime_GetPropertyNamesFast) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -770,19 +838,6 @@ RUNTIME_FUNCTION(Runtime_GetPropertyNamesFast) {
}
-// Find the length of the prototype chain that is to be handled as one. If a
-// prototype object is hidden it is to be viewed as part of the the object it
-// is prototype for.
-static int OwnPrototypeChainLength(JSObject* obj) {
- int count = 1;
- for (PrototypeIterator iter(obj->GetIsolate(), obj);
- !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
- count++;
- }
- return count;
-}
-
-
// Return the names of the own named properties.
// args[0]: object
// args[1]: PropertyAttributes as int
@@ -792,47 +847,18 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyNames) {
if (!args[0]->IsJSObject()) {
return isolate->heap()->undefined_value();
}
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_SMI_ARG_CHECKED(filter_value, 1);
PropertyAttributes filter = static_cast<PropertyAttributes>(filter_value);
- // Skip the global proxy as it has no properties and always delegates to the
- // real global object.
- if (obj->IsJSGlobalProxy()) {
- // Only collect names if access is permitted.
- if (obj->IsAccessCheckNeeded() && !isolate->MayAccess(obj)) {
- isolate->ReportFailedAccessCheck(obj);
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- return *isolate->factory()->NewJSArray(0);
- }
- PrototypeIterator iter(isolate, obj);
- obj = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
- }
-
- // Find the number of objects making up this.
- int length = OwnPrototypeChainLength(*obj);
-
// Find the number of own properties for each of the objects.
- ScopedVector<int> own_property_count(length);
int total_property_count = 0;
- {
- PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER);
- for (int i = 0; i < length; i++) {
- DCHECK(!iter.IsAtEnd());
- Handle<JSObject> jsproto =
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
- // Only collect names if access is permitted.
- if (jsproto->IsAccessCheckNeeded() && !isolate->MayAccess(jsproto)) {
- isolate->ReportFailedAccessCheck(jsproto);
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- return *isolate->factory()->NewJSArray(0);
- }
- int n;
- n = jsproto->NumberOfOwnProperties(filter);
- own_property_count[i] = n;
- total_property_count += n;
- iter.Advance();
- }
+ for (PrototypeIterator iter(isolate, object,
+ PrototypeIterator::START_AT_RECEIVER);
+ !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
+ Handle<JSObject> jsproto =
+ Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ total_property_count += jsproto->NumberOfOwnProperties(filter);
}
// Allocate an array with storage for all the property names.
@@ -842,53 +868,69 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyNames) {
// Get the property names.
int next_copy_index = 0;
int hidden_strings = 0;
- {
- PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER);
- for (int i = 0; i < length; i++) {
- DCHECK(!iter.IsAtEnd());
- Handle<JSObject> jsproto =
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
- jsproto->GetOwnPropertyNames(*names, next_copy_index, filter);
- // Names from hidden prototypes may already have been added
- // for inherited function template instances. Count the duplicates
- // and stub them out; the final copy pass at the end ignores holes.
- for (int j = next_copy_index; j < next_copy_index + own_property_count[i];
- j++) {
- Object* name_from_hidden_proto = names->get(j);
- if (isolate->IsInternallyUsedPropertyName(name_from_hidden_proto)) {
- hidden_strings++;
- } else {
- for (int k = 0; k < next_copy_index; k++) {
- Object* name = names->get(k);
- if (name_from_hidden_proto == name) {
- names->set(j, isolate->heap()->hidden_string());
- hidden_strings++;
- break;
- }
+ Handle<Object> hidden_string = isolate->factory()->hidden_string();
+ for (PrototypeIterator iter(isolate, object,
+ PrototypeIterator::START_AT_RECEIVER);
+ !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
+ Handle<JSObject> jsproto =
+ Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ int own = jsproto->GetOwnPropertyNames(*names, next_copy_index, filter);
+ // Names from hidden prototypes may already have been added
+ // for inherited function template instances. Count the duplicates
+ // and stub them out; the final copy pass at the end ignores holes.
+ for (int j = next_copy_index; j < next_copy_index + own; j++) {
+ Object* name_from_hidden_proto = names->get(j);
+ if (isolate->IsInternallyUsedPropertyName(name_from_hidden_proto)) {
+ hidden_strings++;
+ } else {
+ for (int k = 0; k < next_copy_index; k++) {
+ Object* name = names->get(k);
+ if (name_from_hidden_proto == name) {
+ names->set(j, *hidden_string);
+ hidden_strings++;
+ break;
}
}
}
- next_copy_index += own_property_count[i];
+ }
+ next_copy_index += own;
+ }
+
+ CHECK_EQ(total_property_count, next_copy_index);
- iter.Advance();
+ if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
+ for (int i = 0; i < total_property_count; i++) {
+ Handle<Name> name(Name::cast(names->get(i)));
+ if (name.is_identical_to(hidden_string)) continue;
+ LookupIterator it(object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+ if (!JSObject::AllCanRead(&it)) {
+ names->set(i, *hidden_string);
+ hidden_strings++;
+ }
}
}
// Filter out name of hidden properties object and
// hidden prototype duplicates.
if (hidden_strings > 0) {
- Handle<FixedArray> old_names = names;
- names = isolate->factory()->NewFixedArray(names->length() - hidden_strings);
- int dest_pos = 0;
- for (int i = 0; i < total_property_count; i++) {
- Object* name = old_names->get(i);
- if (isolate->IsInternallyUsedPropertyName(name)) {
- hidden_strings--;
- continue;
+ if (hidden_strings == total_property_count) {
+ names = isolate->factory()->empty_fixed_array();
+ } else {
+ int i;
+ for (i = 0; i < total_property_count; i++) {
+ Object* name = names->get(i);
+ if (name == *hidden_string) break;
}
- names->set(dest_pos++, name);
+ int dest_pos = i;
+ for (; i < total_property_count; i++) {
+ Object* name = names->get(i);
+ if (name == *hidden_string) continue;
+ names->set(dest_pos++, name);
+ }
+
+ isolate->heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
+ *names, hidden_strings);
}
- DCHECK_EQ(0, hidden_strings);
}
return *isolate->factory()->NewJSArrayWithElements(names);
@@ -970,20 +1012,6 @@ RUNTIME_FUNCTION(Runtime_OwnKeys) {
CONVERT_ARG_CHECKED(JSObject, raw_object, 0);
Handle<JSObject> object(raw_object);
- if (object->IsJSGlobalProxy()) {
- // Do access checks before going to the global object.
- if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
- isolate->ReportFailedAccessCheck(object);
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- return *isolate->factory()->NewJSArray(0);
- }
-
- PrototypeIterator iter(isolate, object);
- // If proxy is detached we simply return an empty array.
- if (iter.IsAtEnd()) return *isolate->factory()->NewJSArray(0);
- object = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
- }
-
Handle<FixedArray> contents;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, contents, JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY));
@@ -1022,15 +1050,6 @@ RUNTIME_FUNCTION(Runtime_ToFastProperties) {
}
-RUNTIME_FUNCTION(Runtime_ToBool) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, object, 0);
-
- return isolate->heap()->ToBoolean(object->BooleanValue());
-}
-
-
RUNTIME_FUNCTION(Runtime_NewStringWrapper) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -1192,7 +1211,7 @@ RUNTIME_FUNCTION(Runtime_LoadMutableDouble) {
FieldIndex::ForLoadByFieldIndex(object->map(), index->value());
if (field_index.is_inobject()) {
RUNTIME_ASSERT(field_index.property_index() <
- object->map()->inobject_properties());
+ object->map()->GetInObjectProperties());
} else {
RUNTIME_ASSERT(field_index.outobject_array_index() <
object->properties()->length());
@@ -1371,14 +1390,6 @@ RUNTIME_FUNCTION(Runtime_IsObject) {
}
-RUNTIME_FUNCTION(Runtime_IsUndetectableObject) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsUndetectableObject());
-}
-
-
RUNTIME_FUNCTION(Runtime_IsSpecObject) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -1437,6 +1448,29 @@ RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
}
+RUNTIME_FUNCTION(Runtime_ToObject) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<JSReceiver> receiver;
+ if (JSReceiver::ToObject(isolate, object).ToHandle(&receiver)) {
+ return *receiver;
+ }
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
+}
+
+
+RUNTIME_FUNCTION(Runtime_StrictEquals) {
+ SealHandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_CHECKED(Object, x, 0);
+ CONVERT_ARG_CHECKED(Object, y, 1);
+ // TODO(bmeurer): Change this at some point to return true/false instead.
+ return Smi::FromInt(x->StrictEquals(y) ? EQUAL : NOT_EQUAL);
+}
+
+
RUNTIME_FUNCTION(Runtime_IsAccessCheckNeeded) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-observe.cc b/deps/v8/src/runtime/runtime-observe.cc
index 8fc201da0e..e4ce23f87e 100644
--- a/deps/v8/src/runtime/runtime-observe.cc
+++ b/deps/v8/src/runtime/runtime-observe.cc
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
-#include "src/debug.h"
-#include "src/runtime/runtime-utils.h"
+#include "src/debug/debug.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index 0f175c0168..03af691cf3 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
-#include "src/runtime/runtime-utils.h"
+#include "src/factory.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index efa91b8485..de671f5783 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
-#include "src/jsregexp-inl.h"
-#include "src/jsregexp.h"
+#include "src/conversions-inl.h"
#include "src/messages.h"
+#include "src/regexp/jsregexp-inl.h"
+#include "src/regexp/jsregexp.h"
#include "src/runtime/runtime-utils.h"
#include "src/string-builder.h"
#include "src/string-search.h"
@@ -643,7 +644,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
if (!heap->lo_space()->Contains(*answer)) {
heap->CreateFillerObjectAt(end_of_string, delta);
}
- heap->AdjustLiveBytes(answer->address(), -delta, Heap::CONCURRENT_TO_SWEEPER);
+ heap->AdjustLiveBytes(*answer, -delta, Heap::CONCURRENT_TO_SWEEPER);
return *answer;
}
@@ -784,7 +785,7 @@ RUNTIME_FUNCTION(Runtime_RegExpExec) {
}
-RUNTIME_FUNCTION(Runtime_RegExpConstructResultRT) {
+RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
HandleScope handle_scope(isolate);
DCHECK(args.length() == 3);
CONVERT_SMI_ARG_CHECKED(size, 0);
@@ -794,7 +795,7 @@ RUNTIME_FUNCTION(Runtime_RegExpConstructResultRT) {
Handle<FixedArray> elements = isolate->factory()->NewFixedArray(size);
Handle<Map> regexp_map(isolate->native_context()->regexp_result_map());
Handle<JSObject> object =
- isolate->factory()->NewJSObjectFromMap(regexp_map, NOT_TENURED, false);
+ isolate->factory()->NewJSObjectFromMap(regexp_map, NOT_TENURED);
Handle<JSArray> array = Handle<JSArray>::cast(object);
array->set_elements(*elements);
array->set_length(Smi::FromInt(size));
@@ -805,12 +806,6 @@ RUNTIME_FUNCTION(Runtime_RegExpConstructResultRT) {
}
-RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_RegExpConstructResultRT(args, isolate);
-}
-
-
static JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags,
bool* success) {
uint32_t value = JSRegExp::NONE;
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 700925db62..01c828bf40 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/accessors.h"
#include "src/arguments.h"
#include "src/frames-inl.h"
#include "src/messages.h"
-#include "src/runtime/runtime-utils.h"
#include "src/scopeinfo.h"
#include "src/scopes.h"
@@ -86,12 +85,12 @@ static Object* DeclareGlobals(Isolate* isolate, Handle<GlobalObject> global,
RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(2, args.length());
Handle<GlobalObject> global(isolate->global_object());
+ Handle<Context> context(isolate->context());
- CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 1);
- CONVERT_SMI_ARG_CHECKED(flags, 2);
+ CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 0);
+ CONVERT_SMI_ARG_CHECKED(flags, 1);
// Traverse the name/value pairs and set the properties.
int length = pairs->length();
@@ -202,20 +201,16 @@ RUNTIME_FUNCTION(Runtime_InitializeConstGlobal) {
}
-RUNTIME_FUNCTION(Runtime_DeclareLookupSlot) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+namespace {
+Object* DeclareLookupSlot(Isolate* isolate, Handle<String> name,
+ Handle<Object> initial_value,
+ PropertyAttributes attr) {
// Declarations are always made in a function, eval or script context. In
// the case of eval code, the context passed is the context of the caller,
// which may be some nested context and not the declaration context.
- CONVERT_ARG_HANDLE_CHECKED(Context, context_arg, 0);
- Handle<Context> context(context_arg->declaration_context());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
- CONVERT_SMI_ARG_CHECKED(attr_arg, 2);
- PropertyAttributes attr = static_cast<PropertyAttributes>(attr_arg);
- RUNTIME_ASSERT(attr == READ_ONLY || attr == NONE);
- CONVERT_ARG_HANDLE_CHECKED(Object, initial_value, 3);
+ Handle<Context> context_arg(isolate->context(), isolate);
+ Handle<Context> context(context_arg->declaration_context(), isolate);
// TODO(verwaest): Unify the encoding indicating "var" with DeclareGlobals.
bool is_var = *initial_value == NULL;
@@ -230,6 +225,10 @@ RUNTIME_FUNCTION(Runtime_DeclareLookupSlot) {
BindingFlags binding_flags;
Handle<Object> holder =
context->Lookup(name, flags, &index, &attributes, &binding_flags);
+ if (holder.is_null()) {
+ // In case of JSProxy, an exception might have been thrown.
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ }
Handle<JSObject> object;
Handle<Object> value =
@@ -290,6 +289,28 @@ RUNTIME_FUNCTION(Runtime_DeclareLookupSlot) {
return isolate->heap()->undefined_value();
}
+} // namespace
+
+
+RUNTIME_FUNCTION(Runtime_DeclareLookupSlot) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, initial_value, 1);
+
+ return DeclareLookupSlot(isolate, name, initial_value, NONE);
+}
+
+
+RUNTIME_FUNCTION(Runtime_DeclareReadOnlyLookupSlot) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, initial_value, 1);
+
+ return DeclareLookupSlot(isolate, name, initial_value, READ_ONLY);
+}
+
RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
HandleScope scope(isolate);
@@ -308,6 +329,10 @@ RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
BindingFlags binding_flags;
Handle<Object> holder =
context->Lookup(name, flags, &index, &attributes, &binding_flags);
+ if (holder.is_null()) {
+ // In case of JSProxy, an exception might have been thrown.
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ }
if (index >= 0) {
DCHECK(holder->IsContext());
@@ -371,7 +396,7 @@ static Handle<JSObject> NewSloppyArguments(Isolate* isolate,
Object** parameters,
int argument_count) {
CHECK(!IsSubclassConstructor(callee->shared()->kind()));
- DCHECK(callee->is_simple_parameter_list());
+ DCHECK(callee->has_simple_parameters());
Handle<JSObject> result =
isolate->factory()->NewArgumentsObject(callee, argument_count);
@@ -492,7 +517,7 @@ RUNTIME_FUNCTION(Runtime_NewArguments) {
Object** parameters = reinterpret_cast<Object**>(frame->GetParameterSlot(-1));
return (is_strict(callee->shared()->language_mode()) ||
- !callee->is_simple_parameter_list())
+ !callee->has_simple_parameters())
? *NewStrictArguments(isolate, callee, parameters, argument_count)
: *NewSloppyArguments(isolate, callee, parameters, argument_count);
}
@@ -822,7 +847,6 @@ RUNTIME_FUNCTION(Runtime_DeclareModules) {
USE(result);
break;
}
- case INTERNAL:
case TEMPORARY:
case DYNAMIC:
case DYNAMIC_GLOBAL:
@@ -855,6 +879,8 @@ RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
// If the slot was not found the result is true.
if (holder.is_null()) {
+ // In case of JSProxy, an exception might have been thrown.
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
return isolate->heap()->true_value();
}
@@ -1009,11 +1035,19 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
BindingFlags binding_flags;
Handle<Object> holder =
context->Lookup(name, flags, &index, &attributes, &binding_flags);
- // In case of JSProxy, an exception might have been thrown.
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ if (holder.is_null()) {
+ // In case of JSProxy, an exception might have been thrown.
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ }
// The property was found in a context slot.
if (index >= 0) {
+ if ((binding_flags == MUTABLE_CHECK_INITIALIZED ||
+ binding_flags == IMMUTABLE_CHECK_INITIALIZED_HARMONY) &&
+ Handle<Context>::cast(holder)->is_the_hole(index)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
+ }
if ((attributes & READ_ONLY) == 0) {
Handle<Context>::cast(holder)->set(index, *value);
} else if (is_strict(language_mode)) {
@@ -1047,7 +1081,16 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
}
-RUNTIME_FUNCTION(Runtime_GetArgumentsProperty) {
+RUNTIME_FUNCTION(Runtime_ArgumentsLength) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 0);
+ JavaScriptFrameIterator it(isolate);
+ JavaScriptFrame* frame = it.frame();
+ return Smi::FromInt(frame->GetArgumentsLength());
+}
+
+
+RUNTIME_FUNCTION(Runtime_Arguments) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, raw_key, 0);
@@ -1122,20 +1165,5 @@ RUNTIME_FUNCTION(Runtime_GetArgumentsProperty) {
Object::GetProperty(isolate->initial_object_prototype(), key));
return *result;
}
-
-
-RUNTIME_FUNCTION(Runtime_ArgumentsLength) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- return Smi::FromInt(frame->GetArgumentsLength());
-}
-
-
-RUNTIME_FUNCTION(Runtime_Arguments) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_GetArgumentsProperty(args, isolate);
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-simd.cc b/deps/v8/src/runtime/runtime-simd.cc
new file mode 100644
index 0000000000..ce9512e8da
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-simd.cc
@@ -0,0 +1,821 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/runtime/runtime-utils.h"
+
+#include "src/arguments.h"
+#include "src/base/macros.h"
+#include "src/conversions.h"
+#include "src/factory.h"
+#include "src/objects-inl.h"
+
+// Implement Single Instruction Multiple Data (SIMD) operations as defined in
+// the SIMD.js draft spec:
+// http://littledan.github.io/simd.html
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// Functions to convert Numbers to SIMD component types.
+
+template <typename T>
+static T ConvertNumber(double number);
+
+
+template <>
+float ConvertNumber<float>(double number) {
+ return DoubleToFloat32(number);
+}
+
+
+template <>
+int32_t ConvertNumber<int32_t>(double number) {
+ return DoubleToInt32(number);
+}
+
+
+template <>
+int16_t ConvertNumber<int16_t>(double number) {
+ return static_cast<int16_t>(DoubleToInt32(number));
+}
+
+
+template <>
+int8_t ConvertNumber<int8_t>(double number) {
+ return static_cast<int8_t>(DoubleToInt32(number));
+}
+
+
+// TODO(bbudge): Make this consistent with SIMD instruction results.
+inline float RecipApprox(float a) { return 1.0f / a; }
+
+
+// TODO(bbudge): Make this consistent with SIMD instruction results.
+inline float RecipSqrtApprox(float a) { return 1.0f / std::sqrt(a); }
+
+
+// Saturating addition for int16_t and int8_t.
+template <typename T>
+inline T AddSaturate(T a, T b) {
+ const T max = std::numeric_limits<T>::max();
+ const T min = std::numeric_limits<T>::min();
+ int32_t result = a + b;
+ if (result > max) return max;
+ if (result < min) return min;
+ return result;
+}
+
+
+// Saturating subtraction for int16_t and int8_t.
+template <typename T>
+inline T SubSaturate(T a, T b) {
+ const T max = std::numeric_limits<T>::max();
+ const T min = std::numeric_limits<T>::min();
+ int32_t result = a - b;
+ if (result > max) return max;
+ if (result < min) return min;
+ return result;
+}
+
+
+inline float Min(float a, float b) {
+ if (a < b) return a;
+ if (a > b) return b;
+ if (a == b) return std::signbit(a) ? a : b;
+ return std::numeric_limits<float>::quiet_NaN();
+}
+
+
+inline float Max(float a, float b) {
+ if (a > b) return a;
+ if (a < b) return b;
+ if (a == b) return std::signbit(b) ? a : b;
+ return std::numeric_limits<float>::quiet_NaN();
+}
+
+
+inline float MinNumber(float a, float b) {
+ if (std::isnan(a)) return b;
+ if (std::isnan(b)) return a;
+ return Min(a, b);
+}
+
+
+inline float MaxNumber(float a, float b) {
+ if (std::isnan(a)) return b;
+ if (std::isnan(b)) return a;
+ return Max(a, b);
+}
+
+
+inline bool CanCast(int32_t a) { return true; }
+
+
+inline bool CanCast(float a) {
+ return a > std::numeric_limits<int32_t>::min() &&
+ a < std::numeric_limits<int32_t>::max();
+}
+
+} // namespace
+
+//-------------------------------------------------------------------
+
+// SIMD helper functions.
+
+RUNTIME_FUNCTION(Runtime_IsSimdValue) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ return isolate->heap()->ToBoolean(args[0]->IsSimd128Value());
+}
+
+
+RUNTIME_FUNCTION(Runtime_SimdToObject) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Simd128Value, value, 0);
+ return *Object::ToObject(isolate, value).ToHandleChecked();
+}
+
+
+RUNTIME_FUNCTION(Runtime_SimdEquals) {
+ SealHandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_CHECKED(Simd128Value, x, 0);
+ CONVERT_ARG_CHECKED(Simd128Value, y, 1);
+ return Smi::FromInt(x->Equals(y) ? EQUAL : NOT_EQUAL);
+}
+
+
+RUNTIME_FUNCTION(Runtime_SimdSameValue) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(Simd128Value, a, 0);
+ bool result = false;
+ // args[1] is of unknown type.
+ if (args[1]->IsSimd128Value()) {
+ Simd128Value* b = Simd128Value::cast(args[1]);
+ if (a->map() == b->map()) {
+ if (a->IsFloat32x4()) {
+ result = Float32x4::cast(*a)->SameValue(Float32x4::cast(b));
+ } else {
+ result = a->BitwiseEquals(b);
+ }
+ }
+ }
+ return isolate->heap()->ToBoolean(result);
+}
+
+
+RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(Simd128Value, a, 0);
+ bool result = false;
+ // args[1] is of unknown type.
+ if (args[1]->IsSimd128Value()) {
+ Simd128Value* b = Simd128Value::cast(args[1]);
+ if (a->map() == b->map()) {
+ if (a->IsFloat32x4()) {
+ result = Float32x4::cast(*a)->SameValueZero(Float32x4::cast(b));
+ } else {
+ result = a->BitwiseEquals(b);
+ }
+ }
+ }
+ return isolate->heap()->ToBoolean(result);
+}
+
+
+//-------------------------------------------------------------------
+
+// Utility macros.
+
+#define CONVERT_SIMD_LANE_ARG_CHECKED(name, index, lanes) \
+ CONVERT_INT32_ARG_CHECKED(name, index); \
+ RUNTIME_ASSERT(name >= 0 && name < lanes);
+
+#define SIMD_UNARY_OP(type, lane_type, lane_count, op, result) \
+ static const int kLaneCount = lane_count; \
+ DCHECK(args.length() == 1); \
+ CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ lane_type lanes[kLaneCount]; \
+ for (int i = 0; i < kLaneCount; i++) { \
+ lanes[i] = op(a->get_lane(i)); \
+ } \
+ Handle<type> result = isolate->factory()->New##type(lanes);
+
+#define SIMD_BINARY_OP(type, lane_type, lane_count, op, result) \
+ static const int kLaneCount = lane_count; \
+ DCHECK(args.length() == 2); \
+ CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_ARG_HANDLE_CHECKED(type, b, 1); \
+ lane_type lanes[kLaneCount]; \
+ for (int i = 0; i < kLaneCount; i++) { \
+ lanes[i] = op(a->get_lane(i), b->get_lane(i)); \
+ } \
+ Handle<type> result = isolate->factory()->New##type(lanes);
+
+#define SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, op, result) \
+ static const int kLaneCount = lane_count; \
+ DCHECK(args.length() == 2); \
+ CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_ARG_HANDLE_CHECKED(type, b, 1); \
+ bool lanes[kLaneCount]; \
+ for (int i = 0; i < kLaneCount; i++) { \
+ lanes[i] = a->get_lane(i) op b->get_lane(i); \
+ } \
+ Handle<bool_type> result = isolate->factory()->New##bool_type(lanes);
+
+//-------------------------------------------------------------------
+
+// Common functions.
+
+#define GET_NUMERIC_ARG(lane_type, name, index) \
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(a, index); \
+ name = ConvertNumber<lane_type>(a->Number());
+
+#define GET_BOOLEAN_ARG(lane_type, name, index) \
+ name = args[index]->BooleanValue();
+
+#define SIMD_ALL_TYPES(FUNCTION) \
+ FUNCTION(Float32x4, float, 4, NewNumber, GET_NUMERIC_ARG) \
+ FUNCTION(Int32x4, int32_t, 4, NewNumber, GET_NUMERIC_ARG) \
+ FUNCTION(Bool32x4, bool, 4, ToBoolean, GET_BOOLEAN_ARG) \
+ FUNCTION(Int16x8, int16_t, 8, NewNumber, GET_NUMERIC_ARG) \
+ FUNCTION(Bool16x8, bool, 8, ToBoolean, GET_BOOLEAN_ARG) \
+ FUNCTION(Int8x16, int8_t, 16, NewNumber, GET_NUMERIC_ARG) \
+ FUNCTION(Bool8x16, bool, 16, ToBoolean, GET_BOOLEAN_ARG)
+
+#define SIMD_CREATE_FUNCTION(type, lane_type, lane_count, extract, replace) \
+ RUNTIME_FUNCTION(Runtime_Create##type) { \
+ static const int kLaneCount = lane_count; \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == kLaneCount); \
+ lane_type lanes[kLaneCount]; \
+ for (int i = 0; i < kLaneCount; i++) { \
+ replace(lane_type, lanes[i], i) \
+ } \
+ return *isolate->factory()->New##type(lanes); \
+ }
+
+#define SIMD_EXTRACT_FUNCTION(type, lane_type, lane_count, extract, replace) \
+ RUNTIME_FUNCTION(Runtime_##type##ExtractLane) { \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 2); \
+ CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_LANE_ARG_CHECKED(lane, 1, lane_count); \
+ return *isolate->factory()->extract(a->get_lane(lane)); \
+ }
+
+#define SIMD_REPLACE_FUNCTION(type, lane_type, lane_count, extract, replace) \
+ RUNTIME_FUNCTION(Runtime_##type##ReplaceLane) { \
+ static const int kLaneCount = lane_count; \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 3); \
+ CONVERT_ARG_HANDLE_CHECKED(type, simd, 0); \
+ CONVERT_SIMD_LANE_ARG_CHECKED(lane, 1, kLaneCount); \
+ lane_type lanes[kLaneCount]; \
+ for (int i = 0; i < kLaneCount; i++) { \
+ lanes[i] = simd->get_lane(i); \
+ } \
+ replace(lane_type, lanes[lane], 2); \
+ Handle<type> result = isolate->factory()->New##type(lanes); \
+ return *result; \
+ }
+
+#define SIMD_CHECK_FUNCTION(type, lane_type, lane_count, extract, replace) \
+ RUNTIME_FUNCTION(Runtime_##type##Check) { \
+ HandleScope scope(isolate); \
+ CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ return *a; \
+ }
+
+#define SIMD_SWIZZLE_FUNCTION(type, lane_type, lane_count, extract, replace) \
+ RUNTIME_FUNCTION(Runtime_##type##Swizzle) { \
+ static const int kLaneCount = lane_count; \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 1 + kLaneCount); \
+ CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ lane_type lanes[kLaneCount]; \
+ for (int i = 0; i < kLaneCount; i++) { \
+ CONVERT_SIMD_LANE_ARG_CHECKED(index, i + 1, kLaneCount); \
+ lanes[i] = a->get_lane(index); \
+ } \
+ Handle<type> result = isolate->factory()->New##type(lanes); \
+ return *result; \
+ }
+
+#define SIMD_SHUFFLE_FUNCTION(type, lane_type, lane_count, extract, replace) \
+ RUNTIME_FUNCTION(Runtime_##type##Shuffle) { \
+ static const int kLaneCount = lane_count; \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 2 + kLaneCount); \
+ CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_ARG_HANDLE_CHECKED(type, b, 1); \
+ lane_type lanes[kLaneCount]; \
+ for (int i = 0; i < kLaneCount; i++) { \
+ CONVERT_SIMD_LANE_ARG_CHECKED(index, i + 2, kLaneCount * 2); \
+ lanes[i] = index < kLaneCount ? a->get_lane(index) \
+ : b->get_lane(index - kLaneCount); \
+ } \
+ Handle<type> result = isolate->factory()->New##type(lanes); \
+ return *result; \
+ }
+
+SIMD_ALL_TYPES(SIMD_CREATE_FUNCTION)
+SIMD_ALL_TYPES(SIMD_EXTRACT_FUNCTION)
+SIMD_ALL_TYPES(SIMD_REPLACE_FUNCTION)
+SIMD_ALL_TYPES(SIMD_CHECK_FUNCTION)
+SIMD_ALL_TYPES(SIMD_SWIZZLE_FUNCTION)
+SIMD_ALL_TYPES(SIMD_SHUFFLE_FUNCTION)
+
+//-------------------------------------------------------------------
+
+// Float-only functions.
+
+#define SIMD_ABS_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Abs) { \
+ HandleScope scope(isolate); \
+ SIMD_UNARY_OP(type, lane_type, lane_count, std::abs, result); \
+ return *result; \
+ }
+
+#define SIMD_SQRT_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Sqrt) { \
+ HandleScope scope(isolate); \
+ SIMD_UNARY_OP(type, lane_type, lane_count, std::sqrt, result); \
+ return *result; \
+ }
+
+#define SIMD_RECIP_APPROX_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##RecipApprox) { \
+ HandleScope scope(isolate); \
+ SIMD_UNARY_OP(type, lane_type, lane_count, RecipApprox, result); \
+ return *result; \
+ }
+
+#define SIMD_RECIP_SQRT_APPROX_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##RecipSqrtApprox) { \
+ HandleScope scope(isolate); \
+ SIMD_UNARY_OP(type, lane_type, lane_count, RecipSqrtApprox, result); \
+ return *result; \
+ }
+
+#define BINARY_DIV(a, b) (a) / (b)
+#define SIMD_DIV_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Div) { \
+ HandleScope scope(isolate); \
+ SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_DIV, result); \
+ return *result; \
+ }
+
+#define SIMD_MINNUM_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##MinNum) { \
+ HandleScope scope(isolate); \
+ SIMD_BINARY_OP(type, lane_type, lane_count, MinNumber, result); \
+ return *result; \
+ }
+
+#define SIMD_MAXNUM_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##MaxNum) { \
+ HandleScope scope(isolate); \
+ SIMD_BINARY_OP(type, lane_type, lane_count, MaxNumber, result); \
+ return *result; \
+ }
+
+SIMD_ABS_FUNCTION(Float32x4, float, 4)
+SIMD_SQRT_FUNCTION(Float32x4, float, 4)
+SIMD_RECIP_APPROX_FUNCTION(Float32x4, float, 4)
+SIMD_RECIP_SQRT_APPROX_FUNCTION(Float32x4, float, 4)
+SIMD_DIV_FUNCTION(Float32x4, float, 4)
+SIMD_MINNUM_FUNCTION(Float32x4, float, 4)
+SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
+
+//-------------------------------------------------------------------
+
+// Int-only functions.
+
+#define SIMD_INT_TYPES(FUNCTION) \
+ FUNCTION(Int32x4, int32_t, 32, 4) \
+ FUNCTION(Int16x8, int16_t, 16, 8) \
+ FUNCTION(Int8x16, int8_t, 8, 16)
+
+#define CONVERT_SHIFT_ARG_CHECKED(name, index) \
+ RUNTIME_ASSERT(args[index]->IsNumber()); \
+ int32_t signed_shift = 0; \
+ RUNTIME_ASSERT(args[index]->ToInt32(&signed_shift)); \
+ uint32_t name = bit_cast<uint32_t>(signed_shift);
+
+#define SIMD_LSL_FUNCTION(type, lane_type, lane_bits, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##ShiftLeftByScalar) { \
+ static const int kLaneCount = lane_count; \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 2); \
+ CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
+ lane_type lanes[kLaneCount] = {0}; \
+ if (shift < lane_bits) { \
+ for (int i = 0; i < kLaneCount; i++) { \
+ lanes[i] = a->get_lane(i) << shift; \
+ } \
+ } \
+ Handle<type> result = isolate->factory()->New##type(lanes); \
+ return *result; \
+ }
+
+#define SIMD_LSR_FUNCTION(type, lane_type, lane_bits, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##ShiftRightLogicalByScalar) { \
+ static const int kLaneCount = lane_count; \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 2); \
+ CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
+ lane_type lanes[kLaneCount] = {0}; \
+ if (shift < lane_bits) { \
+ for (int i = 0; i < kLaneCount; i++) { \
+ lanes[i] = static_cast<lane_type>( \
+ bit_cast<u##lane_type>(a->get_lane(i)) >> shift); \
+ } \
+ } \
+ Handle<type> result = isolate->factory()->New##type(lanes); \
+ return *result; \
+ }
+
+#define SIMD_ASR_FUNCTION(type, lane_type, lane_bits, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##ShiftRightArithmeticByScalar) { \
+ static const int kLaneCount = lane_count; \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 2); \
+ CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
+ if (shift >= lane_bits) shift = lane_bits - 1; \
+ lane_type lanes[kLaneCount]; \
+ for (int i = 0; i < kLaneCount; i++) { \
+ int64_t shifted = static_cast<int64_t>(a->get_lane(i)) >> shift; \
+ lanes[i] = static_cast<lane_type>(shifted); \
+ } \
+ Handle<type> result = isolate->factory()->New##type(lanes); \
+ return *result; \
+ }
+
+SIMD_INT_TYPES(SIMD_LSL_FUNCTION)
+SIMD_INT_TYPES(SIMD_LSR_FUNCTION)
+SIMD_INT_TYPES(SIMD_ASR_FUNCTION)
+
+//-------------------------------------------------------------------
+
+// Bool-only functions.
+
+#define SIMD_BOOL_TYPES(FUNCTION) \
+ FUNCTION(Bool32x4, 4) \
+ FUNCTION(Bool16x8, 8) \
+ FUNCTION(Bool8x16, 16)
+
+#define SIMD_ANY_FUNCTION(type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##AnyTrue) { \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 1); \
+ CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ bool result = false; \
+ for (int i = 0; i < lane_count; i++) { \
+ if (a->get_lane(i)) { \
+ result = true; \
+ break; \
+ } \
+ } \
+ return isolate->heap()->ToBoolean(result); \
+ }
+
+#define SIMD_ALL_FUNCTION(type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##AllTrue) { \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 1); \
+ CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ bool result = true; \
+ for (int i = 0; i < lane_count; i++) { \
+ if (!a->get_lane(i)) { \
+ result = false; \
+ break; \
+ } \
+ } \
+ return isolate->heap()->ToBoolean(result); \
+ }
+
+SIMD_BOOL_TYPES(SIMD_ANY_FUNCTION)
+SIMD_BOOL_TYPES(SIMD_ALL_FUNCTION)
+
+//-------------------------------------------------------------------
+
+// Small Int-only functions.
+
+#define SIMD_SMALL_INT_TYPES(FUNCTION) \
+ FUNCTION(Int16x8, int16_t, 8) \
+ FUNCTION(Int8x16, int8_t, 16)
+
+#define SIMD_ADD_SATURATE_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##AddSaturate) { \
+ HandleScope scope(isolate); \
+ SIMD_BINARY_OP(type, lane_type, lane_count, AddSaturate, result); \
+ return *result; \
+ }
+
+#define BINARY_SUB(a, b) (a) - (b)
+#define SIMD_SUB_SATURATE_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##SubSaturate) { \
+ HandleScope scope(isolate); \
+ SIMD_BINARY_OP(type, lane_type, lane_count, SubSaturate, result); \
+ return *result; \
+ }
+
+SIMD_SMALL_INT_TYPES(SIMD_ADD_SATURATE_FUNCTION)
+SIMD_SMALL_INT_TYPES(SIMD_SUB_SATURATE_FUNCTION)
+
+//-------------------------------------------------------------------
+
+// Numeric functions.
+
+#define SIMD_NUMERIC_TYPES(FUNCTION) \
+ FUNCTION(Float32x4, float, 4) \
+ FUNCTION(Int32x4, int32_t, 4) \
+ FUNCTION(Int16x8, int16_t, 8) \
+ FUNCTION(Int8x16, int8_t, 16)
+
+#define SIMD_NEG_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Neg) { \
+ HandleScope scope(isolate); \
+ SIMD_UNARY_OP(type, lane_type, lane_count, -, result); \
+ return *result; \
+ }
+
+#define BINARY_ADD(a, b) (a) + (b)
+#define SIMD_ADD_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Add) { \
+ HandleScope scope(isolate); \
+ SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_ADD, result); \
+ return *result; \
+ }
+
+#define BINARY_SUB(a, b) (a) - (b)
+#define SIMD_SUB_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Sub) { \
+ HandleScope scope(isolate); \
+ SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_SUB, result); \
+ return *result; \
+ }
+
+#define BINARY_MUL(a, b) (a) * (b)
+#define SIMD_MUL_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Mul) { \
+ HandleScope scope(isolate); \
+ SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_MUL, result); \
+ return *result; \
+ }
+
+#define SIMD_MIN_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Min) { \
+ HandleScope scope(isolate); \
+ SIMD_BINARY_OP(type, lane_type, lane_count, Min, result); \
+ return *result; \
+ }
+
+#define SIMD_MAX_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Max) { \
+ HandleScope scope(isolate); \
+ SIMD_BINARY_OP(type, lane_type, lane_count, Max, result); \
+ return *result; \
+ }
+
+SIMD_NUMERIC_TYPES(SIMD_NEG_FUNCTION)
+SIMD_NUMERIC_TYPES(SIMD_ADD_FUNCTION)
+SIMD_NUMERIC_TYPES(SIMD_SUB_FUNCTION)
+SIMD_NUMERIC_TYPES(SIMD_MUL_FUNCTION)
+SIMD_NUMERIC_TYPES(SIMD_MIN_FUNCTION)
+SIMD_NUMERIC_TYPES(SIMD_MAX_FUNCTION)
+
+//-------------------------------------------------------------------
+
+// Relational functions.
+
+#define SIMD_RELATIONAL_TYPES(FUNCTION) \
+ FUNCTION(Float32x4, Bool32x4, 4) \
+ FUNCTION(Int32x4, Bool32x4, 4) \
+ FUNCTION(Int16x8, Bool16x8, 8) \
+ FUNCTION(Int8x16, Bool8x16, 16)
+
+#define SIMD_EQUALITY_TYPES(FUNCTION) \
+ SIMD_RELATIONAL_TYPES(FUNCTION) \
+ FUNCTION(Bool32x4, Bool32x4, 4) \
+ FUNCTION(Bool16x8, Bool16x8, 8) \
+ FUNCTION(Bool8x16, Bool8x16, 16)
+
+#define SIMD_EQUAL_FUNCTION(type, bool_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Equal) { \
+ HandleScope scope(isolate); \
+ SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, ==, result); \
+ return *result; \
+ }
+
+#define SIMD_NOT_EQUAL_FUNCTION(type, bool_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##NotEqual) { \
+ HandleScope scope(isolate); \
+ SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, !=, result); \
+ return *result; \
+ }
+
+SIMD_EQUALITY_TYPES(SIMD_EQUAL_FUNCTION)
+SIMD_EQUALITY_TYPES(SIMD_NOT_EQUAL_FUNCTION)
+
+#define SIMD_LESS_THAN_FUNCTION(type, bool_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##LessThan) { \
+ HandleScope scope(isolate); \
+ SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, <, result); \
+ return *result; \
+ }
+
+#define SIMD_LESS_THAN_OR_EQUAL_FUNCTION(type, bool_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##LessThanOrEqual) { \
+ HandleScope scope(isolate); \
+ SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, <=, result); \
+ return *result; \
+ }
+
+#define SIMD_GREATER_THAN_FUNCTION(type, bool_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##GreaterThan) { \
+ HandleScope scope(isolate); \
+ SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, >, result); \
+ return *result; \
+ }
+
+#define SIMD_GREATER_THAN_OR_EQUAL_FUNCTION(type, bool_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##GreaterThanOrEqual) { \
+ HandleScope scope(isolate); \
+ SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, >=, result); \
+ return *result; \
+ }
+
+SIMD_RELATIONAL_TYPES(SIMD_LESS_THAN_FUNCTION)
+SIMD_RELATIONAL_TYPES(SIMD_LESS_THAN_OR_EQUAL_FUNCTION)
+SIMD_RELATIONAL_TYPES(SIMD_GREATER_THAN_FUNCTION)
+SIMD_RELATIONAL_TYPES(SIMD_GREATER_THAN_OR_EQUAL_FUNCTION)
+
+//-------------------------------------------------------------------
+
+// Logical functions.
+
+#define SIMD_LOGICAL_TYPES(FUNCTION) \
+ FUNCTION(Int32x4, int32_t, 4, _INT) \
+ FUNCTION(Int16x8, int16_t, 8, _INT) \
+ FUNCTION(Int8x16, int8_t, 16, _INT) \
+ FUNCTION(Bool32x4, bool, 4, _BOOL) \
+ FUNCTION(Bool16x8, bool, 8, _BOOL) \
+ FUNCTION(Bool8x16, bool, 16, _BOOL)
+
+#define BINARY_AND_INT(a, b) (a) & (b)
+#define BINARY_AND_BOOL(a, b) (a) && (b)
+#define SIMD_AND_FUNCTION(type, lane_type, lane_count, op) \
+ RUNTIME_FUNCTION(Runtime_##type##And) { \
+ HandleScope scope(isolate); \
+ SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_AND##op, result); \
+ return *result; \
+ }
+
+#define BINARY_OR_INT(a, b) (a) | (b)
+#define BINARY_OR_BOOL(a, b) (a) || (b)
+#define SIMD_OR_FUNCTION(type, lane_type, lane_count, op) \
+ RUNTIME_FUNCTION(Runtime_##type##Or) { \
+ HandleScope scope(isolate); \
+ SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_OR##op, result); \
+ return *result; \
+ }
+
+#define BINARY_XOR_INT(a, b) (a) ^ (b)
+#define BINARY_XOR_BOOL(a, b) (a) != (b)
+#define SIMD_XOR_FUNCTION(type, lane_type, lane_count, op) \
+ RUNTIME_FUNCTION(Runtime_##type##Xor) { \
+ HandleScope scope(isolate); \
+ SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_XOR##op, result); \
+ return *result; \
+ }
+
+#define UNARY_NOT_INT ~
+#define UNARY_NOT_BOOL !
+#define SIMD_NOT_FUNCTION(type, lane_type, lane_count, op) \
+ RUNTIME_FUNCTION(Runtime_##type##Not) { \
+ HandleScope scope(isolate); \
+ SIMD_UNARY_OP(type, lane_type, lane_count, UNARY_NOT##op, result); \
+ return *result; \
+ }
+
+SIMD_LOGICAL_TYPES(SIMD_AND_FUNCTION)
+SIMD_LOGICAL_TYPES(SIMD_OR_FUNCTION)
+SIMD_LOGICAL_TYPES(SIMD_XOR_FUNCTION)
+SIMD_LOGICAL_TYPES(SIMD_NOT_FUNCTION)
+
+//-------------------------------------------------------------------
+
+// Select functions.
+
+#define SIMD_SELECT_TYPES(FUNCTION) \
+ FUNCTION(Float32x4, float, Bool32x4, 4) \
+ FUNCTION(Int32x4, int32_t, Bool32x4, 4) \
+ FUNCTION(Int16x8, int16_t, Bool16x8, 8) \
+ FUNCTION(Int8x16, int8_t, Bool8x16, 16)
+
+#define SIMD_SELECT_FUNCTION(type, lane_type, bool_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Select) { \
+ static const int kLaneCount = lane_count; \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 3); \
+ CONVERT_ARG_HANDLE_CHECKED(bool_type, mask, 0); \
+ CONVERT_ARG_HANDLE_CHECKED(type, a, 1); \
+ CONVERT_ARG_HANDLE_CHECKED(type, b, 2); \
+ lane_type lanes[kLaneCount]; \
+ for (int i = 0; i < kLaneCount; i++) { \
+ lanes[i] = mask->get_lane(i) ? a->get_lane(i) : b->get_lane(i); \
+ } \
+ Handle<type> result = isolate->factory()->New##type(lanes); \
+ return *result; \
+ }
+
+SIMD_SELECT_TYPES(SIMD_SELECT_FUNCTION)
+
+//-------------------------------------------------------------------
+
+// Casting functions.
+
+#define SIMD_FROM_TYPES(FUNCTION) \
+ FUNCTION(Float32x4, float, 4, Int32x4, int32_t) \
+ FUNCTION(Int32x4, int32_t, 4, Float32x4, float)
+
+#define SIMD_FROM_FUNCTION(type, lane_type, lane_count, from_type, from_ctype) \
+ RUNTIME_FUNCTION(Runtime_##type##From##from_type) { \
+ static const int kLaneCount = lane_count; \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 1); \
+ CONVERT_ARG_HANDLE_CHECKED(from_type, a, 0); \
+ lane_type lanes[kLaneCount]; \
+ for (int i = 0; i < kLaneCount; i++) { \
+ from_ctype a_value = a->get_lane(i); \
+ RUNTIME_ASSERT(CanCast(a_value)); \
+ lanes[i] = static_cast<lane_type>(a_value); \
+ } \
+ Handle<type> result = isolate->factory()->New##type(lanes); \
+ return *result; \
+ }
+
+SIMD_FROM_TYPES(SIMD_FROM_FUNCTION)
+
+#define SIMD_FROM_BITS_TYPES(FUNCTION) \
+ FUNCTION(Float32x4, float, 4, Int32x4) \
+ FUNCTION(Float32x4, float, 4, Int16x8) \
+ FUNCTION(Float32x4, float, 4, Int8x16) \
+ FUNCTION(Int32x4, int32_t, 4, Float32x4) \
+ FUNCTION(Int32x4, int32_t, 4, Int16x8) \
+ FUNCTION(Int32x4, int32_t, 4, Int8x16) \
+ FUNCTION(Int16x8, int16_t, 8, Float32x4) \
+ FUNCTION(Int16x8, int16_t, 8, Int32x4) \
+ FUNCTION(Int16x8, int16_t, 8, Int8x16) \
+ FUNCTION(Int8x16, int8_t, 16, Float32x4) \
+ FUNCTION(Int8x16, int8_t, 16, Int32x4) \
+ FUNCTION(Int8x16, int8_t, 16, Int16x8)
+
+#define SIMD_FROM_BITS_FUNCTION(type, lane_type, lane_count, from_type) \
+ RUNTIME_FUNCTION(Runtime_##type##From##from_type##Bits) { \
+ static const int kLaneCount = lane_count; \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 1); \
+ CONVERT_ARG_HANDLE_CHECKED(from_type, a, 0); \
+ lane_type lanes[kLaneCount]; \
+ a->CopyBits(lanes); \
+ Handle<type> result = isolate->factory()->New##type(lanes); \
+ return *result; \
+ }
+
+SIMD_FROM_BITS_TYPES(SIMD_FROM_BITS_FUNCTION)
+
+//-------------------------------------------------------------------
+
+// Unsigned extract functions.
+// TODO(bbudge): remove when spec changes to include unsigned int types.
+
+RUNTIME_FUNCTION(Runtime_Int16x8UnsignedExtractLane) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(Int16x8, a, 0);
+ CONVERT_SIMD_LANE_ARG_CHECKED(lane, 1, 8);
+ return *isolate->factory()->NewNumber(bit_cast<uint16_t>(a->get_lane(lane)));
+}
+
+
+RUNTIME_FUNCTION(Runtime_Int8x16UnsignedExtractLane) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(Int8x16, a, 0);
+ CONVERT_SIMD_LANE_ARG_CHECKED(lane, 1, 16);
+ return *isolate->factory()->NewNumber(bit_cast<uint8_t>(a->get_lane(lane)));
+}
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 3b9cfbf969..bb4207f202 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
-#include "src/jsregexp-inl.h"
-#include "src/jsregexp.h"
-#include "src/runtime/runtime-utils.h"
+#include "src/conversions-inl.h"
+#include "src/regexp/jsregexp-inl.h"
+#include "src/regexp/jsregexp.h"
#include "src/string-builder.h"
#include "src/string-search.h"
@@ -281,7 +281,7 @@ RUNTIME_FUNCTION(Runtime_StringLocaleCompare) {
}
-RUNTIME_FUNCTION(Runtime_SubStringRT) {
+RUNTIME_FUNCTION(Runtime_SubString) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
@@ -309,13 +309,7 @@ RUNTIME_FUNCTION(Runtime_SubStringRT) {
}
-RUNTIME_FUNCTION(Runtime_SubString) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_SubStringRT(args, isolate);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringAddRT) {
+RUNTIME_FUNCTION(Runtime_StringAdd) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
@@ -328,12 +322,6 @@ RUNTIME_FUNCTION(Runtime_StringAddRT) {
}
-RUNTIME_FUNCTION(Runtime_StringAdd) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_StringAddRT(args, isolate);
-}
-
-
RUNTIME_FUNCTION(Runtime_InternalizeString) {
HandleScope handles(isolate);
RUNTIME_ASSERT(args.length() == 1);
@@ -428,7 +416,7 @@ RUNTIME_FUNCTION(Runtime_CharFromCode) {
}
-RUNTIME_FUNCTION(Runtime_StringCompareRT) {
+RUNTIME_FUNCTION(Runtime_StringCompare) {
HandleScope handle_scope(isolate);
DCHECK(args.length() == 2);
@@ -497,12 +485,6 @@ RUNTIME_FUNCTION(Runtime_StringCompareRT) {
}
-RUNTIME_FUNCTION(Runtime_StringCompare) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_StringCompareRT(args, isolate);
-}
-
-
RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
diff --git a/deps/v8/src/runtime/runtime-symbol.cc b/deps/v8/src/runtime/runtime-symbol.cc
index 412ee0ae31..8f99a37c42 100644
--- a/deps/v8/src/runtime/runtime-symbol.cc
+++ b/deps/v8/src/runtime/runtime-symbol.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
-#include "src/runtime/runtime-utils.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -51,14 +51,6 @@ RUNTIME_FUNCTION(Runtime_CreateGlobalPrivateSymbol) {
}
-RUNTIME_FUNCTION(Runtime_NewSymbolWrapper) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Symbol, symbol, 0);
- return *Object::ToObject(isolate, symbol).ToHandleChecked();
-}
-
-
RUNTIME_FUNCTION(Runtime_SymbolDescription) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 1325eeb67b..73f6478338 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
-#include "src/runtime/runtime-utils.h"
+#include "src/frames-inl.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/snapshot/natives.h"
namespace v8 {
@@ -342,7 +342,7 @@ RUNTIME_FUNCTION(Runtime_SetFlags) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(String, arg, 0);
- SmartArrayPointer<char> flags =
+ base::SmartArrayPointer<char> flags =
arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
FlagList::SetFlagsFromString(flags.get(), StrLength(flags.get()));
return isolate->heap()->undefined_value();
@@ -378,7 +378,8 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
RUNTIME_FUNCTION(Runtime_NativeScriptsCount) {
DCHECK(args.length() == 0);
return Smi::FromInt(Natives::GetBuiltinsCount() +
- ExtraNatives::GetBuiltinsCount());
+ ExtraNatives::GetBuiltinsCount() +
+ CodeStubNatives::GetBuiltinsCount());
}
@@ -479,24 +480,13 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastHoleyElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(SloppyArgumentsElements)
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FixedTypedArrayElements)
// Properties test sitting with elements tests - not fooling anyone.
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
#undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
-#define TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype, size) \
- RUNTIME_FUNCTION(Runtime_HasExternal##Type##Elements) { \
- CONVERT_ARG_CHECKED(JSObject, obj, 0); \
- return isolate->heap()->ToBoolean(obj->HasExternal##Type##Elements()); \
- }
-
-TYPED_ARRAYS(TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
-
-#undef TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
-
-
#define FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype, s) \
RUNTIME_FUNCTION(Runtime_HasFixed##Type##Elements) { \
CONVERT_ARG_CHECKED(JSObject, obj, 0); \
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 4d35524703..ffa4120903 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/factory.h"
#include "src/messages.h"
+#include "src/objects-inl.h"
#include "src/runtime/runtime.h"
-#include "src/runtime/runtime-utils.h"
-
namespace v8 {
namespace internal {
@@ -159,14 +159,12 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
void Runtime::ArrayIdToTypeAndSize(int arrayId, ExternalArrayType* array_type,
- ElementsKind* external_elements_kind,
ElementsKind* fixed_elements_kind,
size_t* element_size) {
switch (arrayId) {
#define ARRAY_ID_CASE(Type, type, TYPE, ctype, size) \
case ARRAY_ID_##TYPE: \
*array_type = kExternal##Type##Array; \
- *external_elements_kind = EXTERNAL_##TYPE##_ELEMENTS; \
*fixed_elements_kind = TYPE##_ELEMENTS; \
*element_size = size; \
break;
@@ -195,11 +193,9 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
size_t element_size = 1; // Bogus initialization.
- ElementsKind external_elements_kind =
- EXTERNAL_INT8_ELEMENTS; // Bogus initialization.
ElementsKind fixed_elements_kind = INT8_ELEMENTS; // Bogus initialization.
- Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &external_elements_kind,
- &fixed_elements_kind, &element_size);
+ Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &fixed_elements_kind,
+ &element_size);
RUNTIME_ASSERT(holder->map()->elements_kind() == fixed_elements_kind);
size_t byte_offset = 0;
@@ -241,13 +237,11 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(maybe_buffer);
holder->set_buffer(*buffer);
- Handle<ExternalArray> elements = isolate->factory()->NewExternalArray(
- static_cast<int>(length), array_type,
- static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
- Handle<Map> map =
- JSObject::GetElementsTransitionMap(holder, external_elements_kind);
- JSObject::SetMapAndElements(holder, map, elements);
- DCHECK(IsExternalArrayElementsKind(holder->map()->elements_kind()));
+ Handle<FixedTypedArrayBase> elements =
+ isolate->factory()->NewFixedTypedArrayWithExternalPointer(
+ static_cast<int>(length), array_type,
+ static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
+ holder->set_elements(*elements);
} else {
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
Runtime::SetupArrayBuffer(isolate, buffer, true, NULL, byte_length,
@@ -280,11 +274,9 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
size_t element_size = 1; // Bogus initialization.
- ElementsKind external_elements_kind =
- EXTERNAL_INT8_ELEMENTS; // Bogus intialization.
ElementsKind fixed_elements_kind = INT8_ELEMENTS; // Bogus initialization.
- Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &external_elements_kind,
- &fixed_elements_kind, &element_size);
+ Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &fixed_elements_kind,
+ &element_size);
RUNTIME_ASSERT(holder->map()->elements_kind() == fixed_elements_kind);
@@ -340,12 +332,11 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
holder->set_byte_length(*byte_length_obj);
holder->set_length(*length_obj);
- Handle<ExternalArray> elements = isolate->factory()->NewExternalArray(
- static_cast<int>(length), array_type,
- static_cast<uint8_t*>(buffer->backing_store()));
- Handle<Map> map =
- JSObject::GetElementsTransitionMap(holder, external_elements_kind);
- JSObject::SetMapAndElements(holder, map, elements);
+ Handle<FixedTypedArrayBase> elements =
+ isolate->factory()->NewFixedTypedArrayWithExternalPointer(
+ static_cast<int>(length), array_type,
+ static_cast<uint8_t*>(buffer->backing_store()));
+ holder->set_elements(*elements);
if (source->IsJSTypedArray()) {
Handle<JSTypedArray> typed_array(JSTypedArray::cast(*source));
diff --git a/deps/v8/src/runtime/runtime-uri.cc b/deps/v8/src/runtime/runtime-uri.cc
index e0eba4fe4b..4f77af81cb 100644
--- a/deps/v8/src/runtime/runtime-uri.cc
+++ b/deps/v8/src/runtime/runtime-uri.cc
@@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
#include "src/conversions.h"
-#include "src/runtime/runtime-utils.h"
+#include "src/objects-inl.h"
#include "src/string-search.h"
#include "src/utils.h"
-
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index 674f1173a7..a490327af5 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/runtime/runtime.h"
+
+#include "src/handles-inl.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index c4f74e7de1..4545426e83 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -66,6 +66,13 @@ namespace internal {
F(AtomicsIsLockFree, 1, 1)
+#define FOR_EACH_INTRINSIC_FUTEX(F) \
+ F(AtomicsFutexWait, 4, 1) \
+ F(AtomicsFutexWake, 3, 1) \
+ F(AtomicsFutexWakeOrRequeue, 5, 1) \
+ F(AtomicsFutexNumWaitersForTesting, 2, 1)
+
+
#define FOR_EACH_INTRINSIC_CLASSES(F) \
F(ThrowNonMethodError, 0, 1) \
F(ThrowUnsupportedSuperError, 0, 1) \
@@ -75,7 +82,9 @@ namespace internal {
F(ThrowIfStaticPrototype, 1, 1) \
F(ToMethod, 2, 1) \
F(HomeObjectSymbol, 0, 1) \
- F(DefineClass, 6, 1) \
+ F(DefineClass, 5, 1) \
+ F(DefineClassStrong, 5, 1) \
+ F(FinalizeClassDefinition, 2, 1) \
F(DefineClassMethod, 3, 1) \
F(ClassGetSourceCode, 1, 1) \
F(LoadFromSuper, 4, 1) \
@@ -85,8 +94,7 @@ namespace internal {
F(StoreKeyedToSuper_Strict, 4, 1) \
F(StoreKeyedToSuper_Sloppy, 4, 1) \
F(HandleStepInForDerivedConstructors, 1, 1) \
- F(DefaultConstructorCallSuper, 2, 1) \
- F(CallSuperWithSpread, 1, 1)
+ F(DefaultConstructorCallSuper, 2, 1)
#define FOR_EACH_INTRINSIC_COLLECTIONS(F) \
@@ -145,6 +153,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_DEBUG(F) \
+ F(HandleDebuggerStatement, 0, 1) \
F(DebugBreak, 0, 1) \
F(SetDebugEventListener, 2, 1) \
F(ScheduleBreak, 0, 1) \
@@ -292,6 +301,9 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTERNAL(F) \
F(CheckIsBootstrapping, 0, 1) \
+ F(ImportToRuntime, 1, 1) \
+ F(ImportExperimentalToRuntime, 1, 1) \
+ F(InstallJSBuiltins, 1, 1) \
F(Throw, 1, 1) \
F(ReThrow, 1, 1) \
F(UnwindAndFindExceptionHandler, 0, 1) \
@@ -311,9 +323,9 @@ namespace internal {
F(AllocateInTargetSpace, 2, 1) \
F(CollectStackTrace, 2, 1) \
F(RenderCallSite, 0, 1) \
- F(GetFromCacheRT, 2, 1) \
F(MessageGetStartPosition, 1, 1) \
F(MessageGetScript, 1, 1) \
+ F(ErrorToStringRT, 1, 1) \
F(FormatMessageString, 4, 1) \
F(CallSiteGetFileNameRT, 3, 1) \
F(CallSiteGetFunctionNameRT, 3, 1) \
@@ -326,13 +338,13 @@ namespace internal {
F(CallSiteIsEvalRT, 3, 1) \
F(CallSiteIsConstructorRT, 3, 1) \
F(IS_VAR, 1, 1) \
- F(GetFromCache, 2, 1) \
F(IncrementStatsCounter, 1, 1) \
F(Likely, 1, 1) \
F(Unlikely, 1, 1) \
F(HarmonyToString, 0, 1) \
F(GetTypeFeedbackVector, 1, 1) \
- F(GetCallerJSFunction, 0, 1)
+ F(GetCallerJSFunction, 0, 1) \
+ F(GetCodeStubExportsObject, 0, 1)
#define FOR_EACH_INTRINSIC_JSON(F) \
@@ -375,12 +387,11 @@ namespace internal {
F(MathExpRT, 1, 1) \
F(MathClz32, 1, 1) \
F(MathFloor, 1, 1) \
- F(MathPowSlow, 2, 1) \
+ F(MathPow, 2, 1) \
F(MathPowRT, 2, 1) \
F(RoundNumber, 1, 1) \
F(MathSqrt, 1, 1) \
F(MathFround, 1, 1) \
- F(MathPow, 2, 1) \
F(IsMinusZero, 1, 1)
@@ -393,12 +404,10 @@ namespace internal {
F(StringToNumber, 1, 1) \
F(StringParseInt, 2, 1) \
F(StringParseFloat, 1, 1) \
- F(NumberToStringRT, 1, 1) \
+ F(NumberToString, 1, 1) \
F(NumberToStringSkipCache, 1, 1) \
F(NumberToInteger, 1, 1) \
F(NumberToIntegerMapMinusZero, 1, 1) \
- F(NumberToJSUint32, 1, 1) \
- F(NumberToJSInt32, 1, 1) \
F(NumberToSmi, 1, 1) \
F(NumberAdd, 2, 1) \
F(NumberSub, 2, 1) \
@@ -417,7 +426,6 @@ namespace internal {
F(NumberCompare, 3, 1) \
F(SmiLexicographicCompare, 2, 1) \
F(MaxSmi, 0, 1) \
- F(NumberToString, 1, 1) \
F(IsSmi, 1, 1) \
F(IsNonNegativeSmi, 1, 1) \
F(GetRootNaN, 0, 1)
@@ -438,16 +446,19 @@ namespace internal {
F(GetPropertyStrong, 2, 1) \
F(KeyedGetProperty, 2, 1) \
F(KeyedGetPropertyStrong, 2, 1) \
+ F(LoadGlobalViaContext, 1, 1) \
+ F(StoreGlobalViaContext_Sloppy, 2, 1) \
+ F(StoreGlobalViaContext_Strict, 2, 1) \
F(AddNamedProperty, 4, 1) \
F(SetProperty, 4, 1) \
F(AddElement, 3, 1) \
F(AppendElement, 2, 1) \
- F(DeleteProperty, 3, 1) \
+ F(DeleteProperty_Sloppy, 2, 1) \
+ F(DeleteProperty_Strict, 2, 1) \
F(HasOwnProperty, 2, 1) \
F(HasProperty, 2, 1) \
F(HasElement, 2, 1) \
F(IsPropertyEnumerable, 2, 1) \
- F(GetPropertyNames, 1, 1) \
F(GetPropertyNamesFast, 1, 1) \
F(GetOwnPropertyNames, 2, 1) \
F(GetOwnElementNames, 1, 1) \
@@ -456,7 +467,6 @@ namespace internal {
F(GetIndexedInterceptorElementNames, 1, 1) \
F(OwnKeys, 1, 1) \
F(ToFastProperties, 1, 1) \
- F(ToBool, 1, 1) \
F(NewStringWrapper, 1, 1) \
F(AllocateHeapNumber, 0, 1) \
F(NewObject, 2, 1) \
@@ -478,12 +488,13 @@ namespace internal {
F(MapGetInstanceType, 1, 1) \
F(ObjectEquals, 2, 1) \
F(IsObject, 1, 1) \
- F(IsUndetectableObject, 1, 1) \
F(IsSpecObject, 1, 1) \
F(IsStrong, 1, 1) \
F(ClassOf, 1, 1) \
F(DefineGetterPropertyUnchecked, 4, 1) \
F(DefineSetterPropertyUnchecked, 4, 1) \
+ F(ToObject, 1, 1) \
+ F(StrictEquals, 2, 1) \
F(IsAccessCheckNeeded, 1, 1)
@@ -516,7 +527,6 @@ namespace internal {
F(StringReplaceGlobalRegExpWithString, 4, 1) \
F(StringSplit, 3, 1) \
F(RegExpExec, 4, 1) \
- F(RegExpConstructResultRT, 3, 1) \
F(RegExpConstructResult, 3, 1) \
F(RegExpInitializeAndCompile, 3, 1) \
F(MaterializeRegExpLiteral, 4, 1) \
@@ -527,10 +537,11 @@ namespace internal {
#define FOR_EACH_INTRINSIC_SCOPES(F) \
F(ThrowConstAssignError, 0, 1) \
- F(DeclareGlobals, 3, 1) \
+ F(DeclareGlobals, 2, 1) \
F(InitializeVarGlobal, 3, 1) \
F(InitializeConstGlobal, 2, 1) \
- F(DeclareLookupSlot, 4, 1) \
+ F(DeclareLookupSlot, 2, 1) \
+ F(DeclareReadOnlyLookupSlot, 2, 1) \
F(InitializeLegacyConstLookupSlot, 3, 1) \
F(NewArguments, 1, 1) /* TODO(turbofan): Only temporary */ \
F(NewSloppyArguments, 3, 1) \
@@ -549,25 +560,195 @@ namespace internal {
F(DeclareModules, 1, 1) \
F(DeleteLookupSlot, 2, 1) \
F(StoreLookupSlot, 4, 1) \
- F(GetArgumentsProperty, 1, 1) \
F(ArgumentsLength, 0, 1) \
F(Arguments, 1, 1)
+#define FOR_EACH_INTRINSIC_SIMD(F) \
+ F(IsSimdValue, 1, 1) \
+ F(SimdToObject, 1, 1) \
+ F(SimdEquals, 2, 1) \
+ F(SimdSameValue, 2, 1) \
+ F(SimdSameValueZero, 2, 1) \
+ F(CreateFloat32x4, 4, 1) \
+ F(CreateInt32x4, 4, 1) \
+ F(CreateBool32x4, 4, 1) \
+ F(CreateInt16x8, 8, 1) \
+ F(CreateBool16x8, 8, 1) \
+ F(CreateInt8x16, 16, 1) \
+ F(CreateBool8x16, 16, 1) \
+ F(Float32x4Check, 1, 1) \
+ F(Float32x4ExtractLane, 2, 1) \
+ F(Float32x4ReplaceLane, 3, 1) \
+ F(Float32x4Abs, 1, 1) \
+ F(Float32x4Neg, 1, 1) \
+ F(Float32x4Sqrt, 1, 1) \
+ F(Float32x4RecipApprox, 1, 1) \
+ F(Float32x4RecipSqrtApprox, 1, 1) \
+ F(Float32x4Add, 2, 1) \
+ F(Float32x4Sub, 2, 1) \
+ F(Float32x4Mul, 2, 1) \
+ F(Float32x4Div, 2, 1) \
+ F(Float32x4Min, 2, 1) \
+ F(Float32x4Max, 2, 1) \
+ F(Float32x4MinNum, 2, 1) \
+ F(Float32x4MaxNum, 2, 1) \
+ F(Float32x4LessThan, 2, 1) \
+ F(Float32x4LessThanOrEqual, 2, 1) \
+ F(Float32x4GreaterThan, 2, 1) \
+ F(Float32x4GreaterThanOrEqual, 2, 1) \
+ F(Float32x4Equal, 2, 1) \
+ F(Float32x4NotEqual, 2, 1) \
+ F(Float32x4Select, 3, 1) \
+ F(Float32x4Swizzle, 5, 1) \
+ F(Float32x4Shuffle, 6, 1) \
+ F(Float32x4FromInt32x4, 1, 1) \
+ F(Float32x4FromInt32x4Bits, 1, 1) \
+ F(Float32x4FromInt16x8Bits, 1, 1) \
+ F(Float32x4FromInt8x16Bits, 1, 1) \
+ F(Int32x4Check, 1, 1) \
+ F(Int32x4ExtractLane, 2, 1) \
+ F(Int32x4ReplaceLane, 3, 1) \
+ F(Int32x4Neg, 1, 1) \
+ F(Int32x4Add, 2, 1) \
+ F(Int32x4Sub, 2, 1) \
+ F(Int32x4Mul, 2, 1) \
+ F(Int32x4Min, 2, 1) \
+ F(Int32x4Max, 2, 1) \
+ F(Int32x4And, 2, 1) \
+ F(Int32x4Or, 2, 1) \
+ F(Int32x4Xor, 2, 1) \
+ F(Int32x4Not, 1, 1) \
+ F(Int32x4ShiftLeftByScalar, 2, 1) \
+ F(Int32x4ShiftRightLogicalByScalar, 2, 1) \
+ F(Int32x4ShiftRightArithmeticByScalar, 2, 1) \
+ F(Int32x4LessThan, 2, 1) \
+ F(Int32x4LessThanOrEqual, 2, 1) \
+ F(Int32x4GreaterThan, 2, 1) \
+ F(Int32x4GreaterThanOrEqual, 2, 1) \
+ F(Int32x4Equal, 2, 1) \
+ F(Int32x4NotEqual, 2, 1) \
+ F(Int32x4Select, 3, 1) \
+ F(Int32x4Swizzle, 5, 1) \
+ F(Int32x4Shuffle, 6, 1) \
+ F(Int32x4FromFloat32x4, 1, 1) \
+ F(Int32x4FromFloat32x4Bits, 1, 1) \
+ F(Int32x4FromInt16x8Bits, 1, 1) \
+ F(Int32x4FromInt8x16Bits, 1, 1) \
+ F(Bool32x4Check, 1, 1) \
+ F(Bool32x4ExtractLane, 2, 1) \
+ F(Bool32x4ReplaceLane, 3, 1) \
+ F(Bool32x4And, 2, 1) \
+ F(Bool32x4Or, 2, 1) \
+ F(Bool32x4Xor, 2, 1) \
+ F(Bool32x4Not, 1, 1) \
+ F(Bool32x4AnyTrue, 1, 1) \
+ F(Bool32x4AllTrue, 1, 1) \
+ F(Bool32x4Equal, 2, 1) \
+ F(Bool32x4NotEqual, 2, 1) \
+ F(Bool32x4Swizzle, 5, 1) \
+ F(Bool32x4Shuffle, 6, 1) \
+ F(Int16x8Check, 1, 1) \
+ F(Int16x8ExtractLane, 2, 1) \
+ F(Int16x8UnsignedExtractLane, 2, 1) \
+ F(Int16x8ReplaceLane, 3, 1) \
+ F(Int16x8Neg, 1, 1) \
+ F(Int16x8Add, 2, 1) \
+ F(Int16x8AddSaturate, 2, 1) \
+ F(Int16x8Sub, 2, 1) \
+ F(Int16x8SubSaturate, 2, 1) \
+ F(Int16x8Mul, 2, 1) \
+ F(Int16x8Min, 2, 1) \
+ F(Int16x8Max, 2, 1) \
+ F(Int16x8And, 2, 1) \
+ F(Int16x8Or, 2, 1) \
+ F(Int16x8Xor, 2, 1) \
+ F(Int16x8Not, 1, 1) \
+ F(Int16x8ShiftLeftByScalar, 2, 1) \
+ F(Int16x8ShiftRightLogicalByScalar, 2, 1) \
+ F(Int16x8ShiftRightArithmeticByScalar, 2, 1) \
+ F(Int16x8LessThan, 2, 1) \
+ F(Int16x8LessThanOrEqual, 2, 1) \
+ F(Int16x8GreaterThan, 2, 1) \
+ F(Int16x8GreaterThanOrEqual, 2, 1) \
+ F(Int16x8Equal, 2, 1) \
+ F(Int16x8NotEqual, 2, 1) \
+ F(Int16x8Select, 3, 1) \
+ F(Int16x8Swizzle, 9, 1) \
+ F(Int16x8Shuffle, 10, 1) \
+ F(Int16x8FromFloat32x4Bits, 1, 1) \
+ F(Int16x8FromInt32x4Bits, 1, 1) \
+ F(Int16x8FromInt8x16Bits, 1, 1) \
+ F(Bool16x8Check, 1, 1) \
+ F(Bool16x8ExtractLane, 2, 1) \
+ F(Bool16x8ReplaceLane, 3, 1) \
+ F(Bool16x8And, 2, 1) \
+ F(Bool16x8Or, 2, 1) \
+ F(Bool16x8Xor, 2, 1) \
+ F(Bool16x8Not, 1, 1) \
+ F(Bool16x8AnyTrue, 1, 1) \
+ F(Bool16x8AllTrue, 1, 1) \
+ F(Bool16x8Equal, 2, 1) \
+ F(Bool16x8NotEqual, 2, 1) \
+ F(Bool16x8Swizzle, 9, 1) \
+ F(Bool16x8Shuffle, 10, 1) \
+ F(Int8x16Check, 1, 1) \
+ F(Int8x16ExtractLane, 2, 1) \
+ F(Int8x16UnsignedExtractLane, 2, 1) \
+ F(Int8x16ReplaceLane, 3, 1) \
+ F(Int8x16Neg, 1, 1) \
+ F(Int8x16Add, 2, 1) \
+ F(Int8x16AddSaturate, 2, 1) \
+ F(Int8x16Sub, 2, 1) \
+ F(Int8x16SubSaturate, 2, 1) \
+ F(Int8x16Mul, 2, 1) \
+ F(Int8x16Min, 2, 1) \
+ F(Int8x16Max, 2, 1) \
+ F(Int8x16And, 2, 1) \
+ F(Int8x16Or, 2, 1) \
+ F(Int8x16Xor, 2, 1) \
+ F(Int8x16Not, 1, 1) \
+ F(Int8x16ShiftLeftByScalar, 2, 1) \
+ F(Int8x16ShiftRightLogicalByScalar, 2, 1) \
+ F(Int8x16ShiftRightArithmeticByScalar, 2, 1) \
+ F(Int8x16LessThan, 2, 1) \
+ F(Int8x16LessThanOrEqual, 2, 1) \
+ F(Int8x16GreaterThan, 2, 1) \
+ F(Int8x16GreaterThanOrEqual, 2, 1) \
+ F(Int8x16Equal, 2, 1) \
+ F(Int8x16NotEqual, 2, 1) \
+ F(Int8x16Select, 3, 1) \
+ F(Int8x16Swizzle, 17, 1) \
+ F(Int8x16Shuffle, 18, 1) \
+ F(Int8x16FromFloat32x4Bits, 1, 1) \
+ F(Int8x16FromInt32x4Bits, 1, 1) \
+ F(Int8x16FromInt16x8Bits, 1, 1) \
+ F(Bool8x16Check, 1, 1) \
+ F(Bool8x16ExtractLane, 2, 1) \
+ F(Bool8x16ReplaceLane, 3, 1) \
+ F(Bool8x16And, 2, 1) \
+ F(Bool8x16Or, 2, 1) \
+ F(Bool8x16Xor, 2, 1) \
+ F(Bool8x16Not, 1, 1) \
+ F(Bool8x16AnyTrue, 1, 1) \
+ F(Bool8x16AllTrue, 1, 1) \
+ F(Bool8x16Equal, 2, 1) \
+ F(Bool8x16NotEqual, 2, 1) \
+ F(Bool8x16Swizzle, 17, 1) \
+ F(Bool8x16Shuffle, 18, 1)
+
+
#define FOR_EACH_INTRINSIC_STRINGS(F) \
F(StringReplaceOneCharWithString, 3, 1) \
F(StringIndexOf, 3, 1) \
F(StringLastIndexOf, 3, 1) \
F(StringLocaleCompare, 2, 1) \
- F(SubStringRT, 3, 1) \
F(SubString, 3, 1) \
- F(StringAddRT, 2, 1) \
F(StringAdd, 2, 1) \
F(InternalizeString, 1, 1) \
F(StringMatch, 3, 1) \
F(StringCharCodeAtRT, 2, 1) \
F(CharFromCode, 1, 1) \
- F(StringCompareRT, 2, 1) \
F(StringCompare, 2, 1) \
F(StringBuilderConcat, 3, 1) \
F(StringBuilderJoin, 3, 1) \
@@ -596,7 +777,6 @@ namespace internal {
F(CreateSymbol, 1, 1) \
F(CreatePrivateSymbol, 1, 1) \
F(CreateGlobalPrivateSymbol, 1, 1) \
- F(NewSymbolWrapper, 1, 1) \
F(SymbolDescription, 1, 1) \
F(SymbolRegistry, 0, 1) \
F(SymbolIsPrivate, 1, 1)
@@ -637,17 +817,8 @@ namespace internal {
F(HasFastHoleyElements, 1, 1) \
F(HasDictionaryElements, 1, 1) \
F(HasSloppyArgumentsElements, 1, 1) \
- F(HasExternalArrayElements, 1, 1) \
+ F(HasFixedTypedArrayElements, 1, 1) \
F(HasFastProperties, 1, 1) \
- F(HasExternalUint8Elements, 1, 1) \
- F(HasExternalInt8Elements, 1, 1) \
- F(HasExternalUint16Elements, 1, 1) \
- F(HasExternalInt16Elements, 1, 1) \
- F(HasExternalUint32Elements, 1, 1) \
- F(HasExternalInt32Elements, 1, 1) \
- F(HasExternalFloat32Elements, 1, 1) \
- F(HasExternalFloat64Elements, 1, 1) \
- F(HasExternalUint8ClampedElements, 1, 1) \
F(HasFixedUint8Elements, 1, 1) \
F(HasFixedInt8Elements, 1, 1) \
F(HasFixedUint16Elements, 1, 1) \
@@ -706,7 +877,37 @@ namespace internal {
F(LoadLookupSlotNoReferenceError, 2, 2)
+// Most intrinsics are implemented in the runtime/ directory, but ICs are
+// implemented in ic.cc for now.
+#define FOR_EACH_INTRINSIC_IC(F) \
+ F(LoadIC_Miss, 3, 1) \
+ F(KeyedLoadIC_Miss, 3, 1) \
+ F(CallIC_Miss, 3, 1) \
+ F(CallIC_Customization_Miss, 3, 1) \
+ F(StoreIC_Miss, 3, 1) \
+ F(StoreIC_Slow, 3, 1) \
+ F(KeyedStoreIC_Miss, 3, 1) \
+ F(KeyedStoreIC_Slow, 3, 1) \
+ F(StoreCallbackProperty, 5, 1) \
+ F(LoadPropertyWithInterceptorOnly, 3, 1) \
+ F(LoadPropertyWithInterceptor, 3, 1) \
+ F(LoadElementWithInterceptor, 2, 1) \
+ F(StorePropertyWithInterceptor, 3, 1) \
+ F(CompareIC_Miss, 3, 1) \
+ F(BinaryOpIC_Miss, 2, 1) \
+ F(CompareNilIC_Miss, 1, 1) \
+ F(Unreachable, 0, 1) \
+ F(ToBooleanIC_Miss, 1, 1) \
+ F(KeyedLoadIC_MissFromStubFailure, 4, 1) \
+ F(KeyedStoreIC_MissFromStubFailure, 3, 1) \
+ F(StoreIC_MissFromStubFailure, 3, 1) \
+ F(ElementsTransitionAndStoreIC_Miss, 4, 1) \
+ F(BinaryOpIC_MissWithAllocationSite, 3, 1) \
+ F(LoadIC_MissFromStubFailure, 0, 1)
+
+
#define FOR_EACH_INTRINSIC_RETURN_OBJECT(F) \
+ FOR_EACH_INTRINSIC_IC(F) \
FOR_EACH_INTRINSIC_ARRAY(F) \
FOR_EACH_INTRINSIC_ATOMICS(F) \
FOR_EACH_INTRINSIC_CLASSES(F) \
@@ -716,6 +917,7 @@ namespace internal {
FOR_EACH_INTRINSIC_DEBUG(F) \
FOR_EACH_INTRINSIC_FORIN(F) \
FOR_EACH_INTRINSIC_FUNCTION(F) \
+ FOR_EACH_INTRINSIC_FUTEX(F) \
FOR_EACH_INTRINSIC_GENERATOR(F) \
FOR_EACH_INTRINSIC_I18N(F) \
FOR_EACH_INTRINSIC_INTERNAL(F) \
@@ -729,6 +931,7 @@ namespace internal {
FOR_EACH_INTRINSIC_PROXY(F) \
FOR_EACH_INTRINSIC_REGEXP(F) \
FOR_EACH_INTRINSIC_SCOPES(F) \
+ FOR_EACH_INTRINSIC_SIMD(F) \
FOR_EACH_INTRINSIC_STRINGS(F) \
FOR_EACH_INTRINSIC_SYMBOL(F) \
FOR_EACH_INTRINSIC_TEST(F) \
@@ -742,6 +945,12 @@ namespace internal {
FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
+#define F(name, nargs, ressize) \
+ Object* Runtime_##name(int args_length, Object** args_object, \
+ Isolate* isolate);
+FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
+#undef F
+
//---------------------------------------------------------------------------
// Runtime provides access to all C++ runtime functions.
@@ -815,15 +1024,6 @@ class Runtime : public AllStatic {
// Get the intrinsic function with the given function entry address.
static const Function* FunctionForEntry(Address ref);
- // TODO(1240886): Some of the following methods are *not* handle safe, but
- // accept handle arguments. This seems fragile.
-
- // Support getting the characters in a string using [] notation as
- // in Firefox/SpiderMonkey, Safari and Opera.
- MUST_USE_RESULT static MaybeHandle<Object> GetElementOrCharAt(
- Isolate* isolate, Handle<Object> object, uint32_t index,
- LanguageMode language_mode = SLOPPY);
-
MUST_USE_RESULT static MaybeHandle<Object> DeleteObjectProperty(
Isolate* isolate, Handle<JSReceiver> receiver, Handle<Object> key,
LanguageMode language_mode);
@@ -859,8 +1059,6 @@ class Runtime : public AllStatic {
static void NeuterArrayBuffer(Handle<JSArrayBuffer> array_buffer);
- static int FindIndexedNonNativeFrame(JavaScriptFrameIterator* it, int index);
-
enum TypedArrayId {
// arrayIds below should be synchromized with typedarray.js natives.
ARRAY_ID_UINT8 = 1,
@@ -877,7 +1075,6 @@ class Runtime : public AllStatic {
};
static void ArrayIdToTypeAndSize(int array_id, ExternalArrayType* type,
- ElementsKind* external_elements_kind,
ElementsKind* fixed_elements_kind,
size_t* element_size);
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index 5214b7b8d1..4c1c02ac0d 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -2,12 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/safepoint-table.h"
#include "src/deoptimizer.h"
#include "src/disasm.h"
+#include "src/frames-inl.h"
#include "src/macro-assembler.h"
#include "src/ostreams.h"
diff --git a/deps/v8/src/sampler.h b/deps/v8/src/sampler.h
index 120260dbf5..bfdf961229 100644
--- a/deps/v8/src/sampler.h
+++ b/deps/v8/src/sampler.h
@@ -8,6 +8,7 @@
#include "include/v8.h"
#include "src/base/atomicops.h"
+#include "src/base/platform/time.h"
#include "src/frames.h"
#include "src/globals.h"
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index ad7c7d983c..c6c0a8d6a2 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -40,9 +40,7 @@ void Utf16CharacterStream::ResetToBookmark() { UNREACHABLE(); }
Scanner::Scanner(UnicodeCache* unicode_cache)
: unicode_cache_(unicode_cache),
bookmark_c0_(kNoBookmark),
- octal_pos_(Location::invalid()),
- harmony_modules_(false),
- harmony_unicode_(false) {
+ octal_pos_(Location::invalid()) {
bookmark_current_.literal_chars = &bookmark_current_literal_;
bookmark_current_.raw_literal_chars = &bookmark_current_raw_literal_;
bookmark_next_.literal_chars = &bookmark_next_literal_;
@@ -1076,10 +1074,9 @@ uc32 Scanner::ScanIdentifierUnicodeEscape() {
template <bool capture_raw>
uc32 Scanner::ScanUnicodeEscape() {
- // Accept both \uxxxx and \u{xxxxxx} (if harmony unicode escapes are
- // allowed). In the latter case, the number of hex digits between { } is
- // arbitrary. \ and u have already been read.
- if (c0_ == '{' && HarmonyUnicode()) {
+ // Accept both \uxxxx and \u{xxxxxx}. In the latter case, the number of
+ // hex digits between { } is arbitrary. \ and u have already been read.
+ if (c0_ == '{') {
Advance<capture_raw>();
uc32 cp = ScanUnlimitedLengthHexNumber<capture_raw>(0x10ffff);
if (cp < 0) {
@@ -1098,74 +1095,71 @@ uc32 Scanner::ScanUnicodeEscape() {
// ----------------------------------------------------------------------------
// Keyword Matcher
-#define KEYWORDS(KEYWORD_GROUP, KEYWORD) \
- KEYWORD_GROUP('b') \
- KEYWORD("break", Token::BREAK) \
- KEYWORD_GROUP('c') \
- KEYWORD("case", Token::CASE) \
- KEYWORD("catch", Token::CATCH) \
- KEYWORD("class", Token::CLASS) \
- KEYWORD("const", Token::CONST) \
- KEYWORD("continue", Token::CONTINUE) \
- KEYWORD_GROUP('d') \
- KEYWORD("debugger", Token::DEBUGGER) \
- KEYWORD("default", Token::DEFAULT) \
- KEYWORD("delete", Token::DELETE) \
- KEYWORD("do", Token::DO) \
- KEYWORD_GROUP('e') \
- KEYWORD("else", Token::ELSE) \
- KEYWORD("enum", Token::FUTURE_RESERVED_WORD) \
- KEYWORD("export", \
- harmony_modules ? Token::EXPORT : Token::FUTURE_RESERVED_WORD) \
- KEYWORD("extends", Token::EXTENDS) \
- KEYWORD_GROUP('f') \
- KEYWORD("false", Token::FALSE_LITERAL) \
- KEYWORD("finally", Token::FINALLY) \
- KEYWORD("for", Token::FOR) \
- KEYWORD("function", Token::FUNCTION) \
- KEYWORD_GROUP('i') \
- KEYWORD("if", Token::IF) \
- KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("import", \
- harmony_modules ? Token::IMPORT : Token::FUTURE_RESERVED_WORD) \
- KEYWORD("in", Token::IN) \
- KEYWORD("instanceof", Token::INSTANCEOF) \
- KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD_GROUP('l') \
- KEYWORD("let", Token::LET) \
- KEYWORD_GROUP('n') \
- KEYWORD("new", Token::NEW) \
- KEYWORD("null", Token::NULL_LITERAL) \
- KEYWORD_GROUP('p') \
- KEYWORD("package", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("private", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("protected", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("public", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD_GROUP('r') \
- KEYWORD("return", Token::RETURN) \
- KEYWORD_GROUP('s') \
- KEYWORD("static", Token::STATIC) \
- KEYWORD("super", Token::SUPER) \
- KEYWORD("switch", Token::SWITCH) \
- KEYWORD_GROUP('t') \
- KEYWORD("this", Token::THIS) \
- KEYWORD("throw", Token::THROW) \
- KEYWORD("true", Token::TRUE_LITERAL) \
- KEYWORD("try", Token::TRY) \
- KEYWORD("typeof", Token::TYPEOF) \
- KEYWORD_GROUP('v') \
- KEYWORD("var", Token::VAR) \
- KEYWORD("void", Token::VOID) \
- KEYWORD_GROUP('w') \
- KEYWORD("while", Token::WHILE) \
- KEYWORD("with", Token::WITH) \
- KEYWORD_GROUP('y') \
+#define KEYWORDS(KEYWORD_GROUP, KEYWORD) \
+ KEYWORD_GROUP('b') \
+ KEYWORD("break", Token::BREAK) \
+ KEYWORD_GROUP('c') \
+ KEYWORD("case", Token::CASE) \
+ KEYWORD("catch", Token::CATCH) \
+ KEYWORD("class", Token::CLASS) \
+ KEYWORD("const", Token::CONST) \
+ KEYWORD("continue", Token::CONTINUE) \
+ KEYWORD_GROUP('d') \
+ KEYWORD("debugger", Token::DEBUGGER) \
+ KEYWORD("default", Token::DEFAULT) \
+ KEYWORD("delete", Token::DELETE) \
+ KEYWORD("do", Token::DO) \
+ KEYWORD_GROUP('e') \
+ KEYWORD("else", Token::ELSE) \
+ KEYWORD("enum", Token::FUTURE_RESERVED_WORD) \
+ KEYWORD("export", Token::EXPORT) \
+ KEYWORD("extends", Token::EXTENDS) \
+ KEYWORD_GROUP('f') \
+ KEYWORD("false", Token::FALSE_LITERAL) \
+ KEYWORD("finally", Token::FINALLY) \
+ KEYWORD("for", Token::FOR) \
+ KEYWORD("function", Token::FUNCTION) \
+ KEYWORD_GROUP('i') \
+ KEYWORD("if", Token::IF) \
+ KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("import", Token::IMPORT) \
+ KEYWORD("in", Token::IN) \
+ KEYWORD("instanceof", Token::INSTANCEOF) \
+ KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD_GROUP('l') \
+ KEYWORD("let", Token::LET) \
+ KEYWORD_GROUP('n') \
+ KEYWORD("new", Token::NEW) \
+ KEYWORD("null", Token::NULL_LITERAL) \
+ KEYWORD_GROUP('p') \
+ KEYWORD("package", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("private", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("protected", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("public", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD_GROUP('r') \
+ KEYWORD("return", Token::RETURN) \
+ KEYWORD_GROUP('s') \
+ KEYWORD("static", Token::STATIC) \
+ KEYWORD("super", Token::SUPER) \
+ KEYWORD("switch", Token::SWITCH) \
+ KEYWORD_GROUP('t') \
+ KEYWORD("this", Token::THIS) \
+ KEYWORD("throw", Token::THROW) \
+ KEYWORD("true", Token::TRUE_LITERAL) \
+ KEYWORD("try", Token::TRY) \
+ KEYWORD("typeof", Token::TYPEOF) \
+ KEYWORD_GROUP('v') \
+ KEYWORD("var", Token::VAR) \
+ KEYWORD("void", Token::VOID) \
+ KEYWORD_GROUP('w') \
+ KEYWORD("while", Token::WHILE) \
+ KEYWORD("with", Token::WITH) \
+ KEYWORD_GROUP('y') \
KEYWORD("yield", Token::YIELD)
static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
- int input_length,
- bool harmony_modules) {
+ int input_length) {
DCHECK(input_length >= 1);
const int kMinLength = 2;
const int kMaxLength = 10;
@@ -1212,8 +1206,7 @@ bool Scanner::IdentifierIsFutureStrictReserved(
return true;
}
return Token::FUTURE_STRICT_RESERVED_WORD ==
- KeywordOrIdentifierToken(string->raw_data(), string->length(),
- harmony_modules_);
+ KeywordOrIdentifierToken(string->raw_data(), string->length());
}
@@ -1246,8 +1239,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
// Only a-z+: could be a keyword or identifier.
literal.Complete();
Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
- return KeywordOrIdentifierToken(chars.start(), chars.length(),
- harmony_modules_);
+ return KeywordOrIdentifierToken(chars.start(), chars.length());
}
HandleLeadSurrogate();
@@ -1297,8 +1289,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
if (next_.literal_chars->is_one_byte()) {
Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
- return KeywordOrIdentifierToken(chars.start(), chars.length(),
- harmony_modules_);
+ return KeywordOrIdentifierToken(chars.start(), chars.length());
}
return Token::IDENTIFIER;
}
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index c842f987b6..92588905ad 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -14,7 +14,7 @@
#include "src/hashmap.h"
#include "src/list.h"
#include "src/token.h"
-#include "src/unicode-inl.h"
+#include "src/unicode.h"
#include "src/unicode-decoder.h"
#include "src/utils.h"
@@ -478,16 +478,6 @@ class Scanner {
// tokens, which is what it is used for.
void SeekForward(int pos);
- bool HarmonyModules() const {
- return harmony_modules_;
- }
- void SetHarmonyModules(bool modules) {
- harmony_modules_ = modules;
- }
-
- bool HarmonyUnicode() const { return harmony_unicode_; }
- void SetHarmonyUnicode(bool unicode) { harmony_unicode_ = unicode; }
-
// Returns true if there was a line terminator before the peek'ed token,
// possibly inside a multi-line comment.
bool HasAnyLineTerminatorBeforeNext() const {
@@ -797,10 +787,6 @@ class Scanner {
// Whether there is a multi-line comment that contains a
// line-terminator after the current token, and before the next.
bool has_multiline_comment_before_next_;
- // Whether we scan 'module', 'import', 'export' as keywords.
- bool harmony_modules_;
- // Whether we allow \u{xxxxx}.
- bool harmony_unicode_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index 4ed22b19dc..e53f36d274 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -34,9 +34,6 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
DCHECK_EQ(scope->ContextLocalCount(), context_local_count);
DCHECK_EQ(scope->ContextGlobalCount(), context_global_count);
- bool simple_parameter_list =
- scope->is_function_scope() ? scope->is_simple_parameter_list() : true;
-
// Determine use and location of the "this" binding if it is present.
VariableAllocationInfo receiver_info;
if (scope->has_this_declaration()) {
@@ -85,6 +82,9 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
Factory* factory = isolate->factory();
Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
+ bool has_simple_parameters =
+ scope->is_function_scope() && scope->has_simple_parameters();
+
// Encode the flags.
int flags = ScopeTypeField::encode(scope->scope_type()) |
CallsEvalField::encode(scope->calls_eval()) |
@@ -94,7 +94,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
FunctionVariableMode::encode(function_variable_mode) |
AsmModuleField::encode(scope->asm_module()) |
AsmFunctionField::encode(scope->asm_function()) |
- IsSimpleParameterListField::encode(simple_parameter_list) |
+ HasSimpleParametersField::encode(has_simple_parameters) |
FunctionKindField::encode(scope->function_kind());
scope_info->SetFlags(flags);
scope_info->SetParameterCount(parameter_count);
@@ -224,7 +224,7 @@ Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
const int context_local_count = 1;
const int context_global_count = 0;
const int strong_mode_free_variable_count = 0;
- const bool simple_parameter_list = true;
+ const bool has_simple_parameters = true;
const VariableAllocationInfo receiver_info = CONTEXT;
const VariableAllocationInfo function_name_info = NONE;
const VariableMode function_variable_mode = VAR;
@@ -248,7 +248,7 @@ Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
FunctionVariableField::encode(function_name_info) |
FunctionVariableMode::encode(function_variable_mode) |
AsmModuleField::encode(false) | AsmFunctionField::encode(false) |
- IsSimpleParameterListField::encode(simple_parameter_list) |
+ HasSimpleParametersField::encode(has_simple_parameters) |
FunctionKindField::encode(FunctionKind::kNormalFunction);
scope_info->SetFlags(flags);
scope_info->SetParameterCount(parameter_count);
@@ -340,7 +340,7 @@ int ScopeInfo::ContextLength() {
scope_type() == MODULE_SCOPE;
if (has_context) {
- return Context::MIN_CONTEXT_SLOTS + context_locals + 2 * context_globals +
+ return Context::MIN_CONTEXT_SLOTS + context_locals + context_globals +
(function_name_context_slot ? 1 : 0);
}
}
@@ -553,7 +553,7 @@ int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
var -= scope_info->ContextLocalCount();
*location = VariableLocation::GLOBAL;
result = Context::MIN_CONTEXT_SLOTS +
- scope_info->ContextLocalCount() + 2 * var;
+ scope_info->ContextLocalCount() + var;
}
context_slot_cache->Update(scope_info, name, *mode, *location,
@@ -564,7 +564,7 @@ int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
}
// Cache as not found. Mode, location, init flag and maybe assigned flag
// don't matter.
- context_slot_cache->Update(scope_info, name, INTERNAL,
+ context_slot_cache->Update(scope_info, name, TEMPORARY,
VariableLocation::CONTEXT, kNeedsInitialization,
kNotAssigned, -1);
}
@@ -572,6 +572,14 @@ int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
}
+String* ScopeInfo::ContextSlotName(int slot_index) {
+ int const var = slot_index - Context::MIN_CONTEXT_SLOTS;
+ DCHECK_LE(0, var);
+ DCHECK_LT(var, ContextLocalCount() + ContextGlobalCount());
+ return ContextLocalName(var);
+}
+
+
int ScopeInfo::ParameterIndex(String* name) {
DCHECK(name->IsInternalizedString());
if (length() > 0) {
@@ -618,30 +626,6 @@ FunctionKind ScopeInfo::function_kind() {
}
-void ScopeInfo::CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
- Handle<Context> context,
- Handle<JSObject> scope_object) {
- Isolate* isolate = scope_info->GetIsolate();
- int local_count = scope_info->ContextLocalCount();
- if (local_count == 0) return;
- // Fill all context locals to the context extension.
- int first_context_var = scope_info->StackLocalCount();
- int start = scope_info->ContextLocalNameEntriesIndex();
- for (int i = 0; i < local_count; ++i) {
- if (scope_info->LocalIsSynthetic(first_context_var + i)) continue;
- int context_index = Context::MIN_CONTEXT_SLOTS + i;
- Handle<Object> value = Handle<Object>(context->get(context_index), isolate);
- // Reflect variables under TDZ as undefined in scope object.
- if (value->IsTheHole()) continue;
- // This should always succeed.
- // TODO(verwaest): Use AddDataProperty instead.
- JSObject::SetOwnPropertyIgnoreAttributes(
- scope_object, handle(String::cast(scope_info->get(i + start))), value,
- ::NONE).Check();
- }
-}
-
-
int ScopeInfo::ParameterEntriesIndex() {
DCHECK(length() > 0);
return kVariablePartIndex;
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index f9eef9ab21..8f821461d1 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -2,21 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/scopes.h"
#include "src/accessors.h"
#include "src/bootstrapper.h"
#include "src/messages.h"
#include "src/parser.h"
#include "src/scopeinfo.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
-// TODO(ishell): remove this once compiler support is landed.
-bool enable_context_globals = false;
-
// ----------------------------------------------------------------------------
// Implementation of LocalsMap
//
@@ -77,7 +73,6 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
AstValueFactory* ast_value_factory, FunctionKind function_kind)
: inner_scopes_(4, zone),
variables_(zone),
- internals_(4, zone),
temps_(4, zone),
params_(4, zone),
unresolved_(16, zone),
@@ -100,7 +95,6 @@ Scope::Scope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
Handle<ScopeInfo> scope_info, AstValueFactory* value_factory)
: inner_scopes_(4, zone),
variables_(zone),
- internals_(4, zone),
temps_(4, zone),
params_(4, zone),
unresolved_(16, zone),
@@ -126,7 +120,6 @@ Scope::Scope(Zone* zone, Scope* inner_scope,
AstValueFactory* value_factory)
: inner_scopes_(1, zone),
variables_(zone),
- internals_(0, zone),
temps_(0, zone),
params_(0, zone),
unresolved_(0, zone),
@@ -154,6 +147,9 @@ void Scope::SetDefaults(ScopeType scope_type, Scope* outer_scope,
FunctionKind function_kind) {
outer_scope_ = outer_scope;
scope_type_ = scope_type;
+ is_declaration_scope_ =
+ is_eval_scope() || is_function_scope() ||
+ is_module_scope() || is_script_scope();
function_kind_ = function_kind;
scope_name_ = ast_value_factory_->empty_string();
dynamics_ = nullptr;
@@ -183,7 +179,8 @@ void Scope::SetDefaults(ScopeType scope_type, Scope* outer_scope,
num_heap_slots_ = 0;
num_global_slots_ = 0;
num_modules_ = 0;
- module_var_ = NULL,
+ module_var_ = NULL;
+ has_simple_parameters_ = true;
rest_parameter_ = NULL;
rest_index_ = -1;
scope_info_ = scope_info;
@@ -261,9 +258,9 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
bool Scope::Analyze(ParseInfo* info) {
- DCHECK(info->function() != NULL);
+ DCHECK(info->literal() != NULL);
DCHECK(info->scope() == NULL);
- Scope* scope = info->function()->scope();
+ Scope* scope = info->literal()->scope();
Scope* top = scope;
// Traverse the scope tree up to the first unresolved scope or the global
@@ -344,7 +341,6 @@ void Scope::Initialize() {
Scope* Scope::FinalizeBlockScope() {
DCHECK(is_block_scope());
- DCHECK(internals_.is_empty());
DCHECK(temps_.is_empty());
DCHECK(params_.is_empty());
@@ -469,21 +465,21 @@ Variable* Scope::DeclareParameter(const AstRawString* name, VariableMode mode,
bool is_rest, bool* is_duplicate) {
DCHECK(!already_resolved());
DCHECK(is_function_scope());
-
Variable* var;
- if (!name->IsEmpty()) {
+ if (mode == TEMPORARY) {
+ var = NewTemporary(name);
+ has_simple_parameters_ = false;
+ } else {
var = variables_.Declare(this, name, mode, Variable::NORMAL,
kCreatedInitialized);
// TODO(wingo): Avoid O(n^2) check.
*is_duplicate = IsDeclaredParameter(name);
- } else {
- var = new (zone())
- Variable(this, name, TEMPORARY, Variable::NORMAL, kCreatedInitialized);
}
if (is_rest) {
DCHECK_NULL(rest_parameter_);
rest_parameter_ = var;
rest_index_ = num_parameters();
+ has_simple_parameters_ = false;
}
params_.Add(var, zone());
return var;
@@ -496,8 +492,8 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
int declaration_group_start) {
DCHECK(!already_resolved());
// This function handles VAR, LET, and CONST modes. DYNAMIC variables are
- // introduces during variable allocation, INTERNAL variables are allocated
- // explicitly, and TEMPORARY variables are allocated via NewTemporary().
+ // introduces during variable allocation, and TEMPORARY variables are
+ // allocated via NewTemporary().
DCHECK(IsDeclaredVariableMode(mode));
++num_var_or_const_;
return variables_.Declare(this, name, mode, kind, init_flag,
@@ -527,26 +523,15 @@ void Scope::RemoveUnresolved(VariableProxy* var) {
}
-Variable* Scope::NewInternal(const AstRawString* name) {
- DCHECK(!already_resolved());
- Variable* var = new(zone()) Variable(this,
- name,
- INTERNAL,
- Variable::NORMAL,
- kCreatedInitialized);
- internals_.Add(var, zone());
- return var;
-}
-
-
Variable* Scope::NewTemporary(const AstRawString* name) {
DCHECK(!already_resolved());
- Variable* var = new(zone()) Variable(this,
+ Scope* scope = this->ClosureScope();
+ Variable* var = new(zone()) Variable(scope,
name,
TEMPORARY,
Variable::NORMAL,
kCreatedInitialized);
- temps_.Add(var, zone());
+ scope->temps_.Add(var, zone());
return var;
}
@@ -575,12 +560,21 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
int length = decls_.length();
for (int i = 0; i < length; i++) {
Declaration* decl = decls_[i];
- if (decl->mode() != VAR) continue;
+ if (decl->mode() != VAR && !is_block_scope()) continue;
const AstRawString* name = decl->proxy()->raw_name();
// Iterate through all scopes until and including the declaration scope.
+ // If the declaration scope is a (declaration) block scope, also continue
+ // (that is to handle the special inner scope of functions with
+ // destructuring parameters, which may not shadow any variables from
+ // the surrounding function scope).
Scope* previous = NULL;
Scope* current = decl->scope();
+ // Lexical vs lexical conflicts within the same scope have already been
+ // captured in Parser::Declare. The only conflicts we still need to check
+ // are lexical vs VAR, or any declarations within a declaration block scope
+ // vs lexical declarations in its surrounding (function) scope.
+ if (decl->mode() != VAR) current = current->outer_scope_;
do {
// There is a conflict if there exists a non-VAR binding.
Variable* other_var = current->variables_.Lookup(name);
@@ -589,7 +583,7 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
}
previous = current;
current = current->outer_scope_;
- } while (!previous->is_declaration_scope());
+ } while (!previous->is_declaration_scope() || previous->is_block_scope());
}
return NULL;
}
@@ -618,15 +612,6 @@ void Scope::CollectStackAndContextLocals(
DCHECK(context_locals != NULL);
DCHECK(context_globals != NULL);
- // Collect internals which are always allocated on the heap.
- for (int i = 0; i < internals_.length(); i++) {
- Variable* var = internals_[i];
- if (var->is_used()) {
- DCHECK(var->IsContextSlot());
- context_locals->Add(var, zone());
- }
- }
-
// Collect temporaries which are always allocated on the stack, unless the
// context as a whole has forced context allocation.
for (int i = 0; i < temps_.length(); i++) {
@@ -635,9 +620,10 @@ void Scope::CollectStackAndContextLocals(
if (var->IsContextSlot()) {
DCHECK(has_forced_context_allocation());
context_locals->Add(var, zone());
- } else {
- DCHECK(var->IsStackLocal());
+ } else if (var->IsStackLocal()) {
stack_locals->Add(var, zone());
+ } else {
+ DCHECK(var->IsParameter());
}
}
}
@@ -707,6 +693,7 @@ bool Scope::HasTrivialContext() const {
if (scope->is_eval_scope()) return false;
if (scope->scope_inside_with_) return false;
if (scope->ContextLocalCount() > 0) return false;
+ if (scope->ContextGlobalCount() > 0) return false;
}
return true;
}
@@ -754,24 +741,35 @@ int Scope::ContextChainLength(Scope* scope) {
}
-Scope* Scope::ScriptScope() {
+Scope* Scope::DeclarationScope() {
Scope* scope = this;
- while (!scope->is_script_scope()) {
+ while (!scope->is_declaration_scope()) {
scope = scope->outer_scope();
}
return scope;
}
-Scope* Scope::DeclarationScope() {
+Scope* Scope::ClosureScope() {
Scope* scope = this;
- while (!scope->is_declaration_scope()) {
+ while (!scope->is_declaration_scope() || scope->is_block_scope()) {
+ scope = scope->outer_scope();
+ }
+ return scope;
+}
+
+
+Scope* Scope::ReceiverScope() {
+ Scope* scope = this;
+ while (!scope->is_script_scope() &&
+ (!scope->is_function_scope() || scope->is_arrow_scope())) {
scope = scope->outer_scope();
}
return scope;
}
+
Handle<ScopeInfo> Scope::GetScopeInfo(Isolate* isolate) {
if (scope_info_.is_null()) {
scope_info_ = ScopeInfo::Create(isolate, zone(), this);
@@ -866,7 +864,10 @@ static void PrintVar(int indent, Variable* var) {
if (var->is_used() || !var->IsUnallocated()) {
Indent(indent, Variable::Mode2String(var->mode()));
PrintF(" ");
- PrintName(var->raw_name());
+ if (var->raw_name()->IsEmpty())
+ PrintF(".%p", reinterpret_cast<void*>(var));
+ else
+ PrintName(var->raw_name());
PrintF("; // ");
PrintLocation(var);
bool comma = !var->IsUnallocated();
@@ -912,7 +913,11 @@ void Scope::Print(int n) {
PrintF(" (");
for (int i = 0; i < params_.length(); i++) {
if (i > 0) PrintF(", ");
- PrintName(params_[i]->raw_name());
+ const AstRawString* name = params_[i]->raw_name();
+ if (name->IsEmpty())
+ PrintF(".%p", reinterpret_cast<void*>(params_[i]));
+ else
+ PrintName(name);
}
PrintF(")");
}
@@ -971,13 +976,6 @@ void Scope::Print(int n) {
}
}
- if (internals_.length() > 0) {
- Indent(n1, "// internal vars:\n");
- for (int i = 0; i < internals_.length(); i++) {
- PrintVar(n1, internals_[i]);
- }
- }
-
if (variables_.Start() != NULL) {
Indent(n1, "// local vars:\n");
PrintMap(n1, &variables_);
@@ -1343,7 +1341,6 @@ bool Scope::MustAllocateInContext(Variable* var) {
// always context-allocated.
if (has_forced_context_allocation()) return true;
if (var->mode() == TEMPORARY) return false;
- if (var->mode() == INTERNAL) return true;
if (is_catch_scope() || is_module_scope()) return true;
if (is_script_scope() && IsLexicalVariableMode(var->mode())) return true;
return var->has_forced_context_allocation() ||
@@ -1366,7 +1363,7 @@ bool Scope::HasArgumentsParameter(Isolate* isolate) {
void Scope::AllocateStackSlot(Variable* var) {
if (is_block_scope()) {
- DeclarationScope()->AllocateStackSlot(var);
+ outer_scope()->DeclarationScope()->AllocateStackSlot(var);
} else {
var->AllocateTo(VariableLocation::LOCAL, num_stack_slots_++);
}
@@ -1404,7 +1401,9 @@ void Scope::AllocateParameterLocals(Isolate* isolate) {
// In strict mode 'arguments' does not alias formal parameters.
// Therefore in strict mode we allocate parameters as if 'arguments'
// were not used.
- uses_sloppy_arguments = is_sloppy(language_mode());
+ // If the parameter list is not simple, arguments isn't sloppy either.
+ uses_sloppy_arguments =
+ is_sloppy(language_mode()) && has_simple_parameters();
}
if (rest_parameter_ && !MustAllocate(rest_parameter_)) {
@@ -1478,14 +1477,16 @@ void Scope::AllocateDeclaredGlobal(Isolate* isolate, Variable* var) {
DCHECK(var->scope() == this);
DCHECK(!var->IsVariable(isolate->factory()->dot_result_string()) ||
!var->IsStackLocal());
- if (var->IsUnallocated() && var->IsStaticGlobalObjectProperty()) {
- DCHECK_EQ(-1, var->index());
- DCHECK(var->name()->IsString());
- var->AllocateTo(VariableLocation::GLOBAL, num_heap_slots_);
- num_global_slots_++;
- // Each global variable occupies two slots in the context: for reads
- // and writes.
- num_heap_slots_ += 2;
+ if (var->IsUnallocated()) {
+ if (var->IsStaticGlobalObjectProperty()) {
+ DCHECK_EQ(-1, var->index());
+ DCHECK(var->name()->IsString());
+ var->AllocateTo(VariableLocation::GLOBAL, num_heap_slots_++);
+ num_global_slots_++;
+ } else {
+ // There must be only DYNAMIC_GLOBAL in the script scope.
+ DCHECK(!is_script_scope() || DYNAMIC_GLOBAL == var->mode());
+ }
}
}
@@ -1496,10 +1497,6 @@ void Scope::AllocateNonParameterLocalsAndDeclaredGlobals(Isolate* isolate) {
AllocateNonParameterLocal(isolate, temps_[i]);
}
- for (int i = 0; i < internals_.length(); i++) {
- AllocateNonParameterLocal(isolate, internals_[i]);
- }
-
ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
for (VariableMap::Entry* p = variables_.Start();
p != NULL;
@@ -1513,7 +1510,7 @@ void Scope::AllocateNonParameterLocalsAndDeclaredGlobals(Isolate* isolate) {
AllocateNonParameterLocal(isolate, vars[i].var());
}
- if (enable_context_globals) {
+ if (FLAG_global_var_shortcuts) {
for (int i = 0; i < var_count; i++) {
AllocateDeclaredGlobal(isolate, vars[i].var());
}
@@ -1593,7 +1590,8 @@ void Scope::AllocateModules() {
DCHECK(!scope->already_resolved());
DCHECK(scope->module_descriptor_->IsFrozen());
DCHECK_NULL(scope->module_var_);
- scope->module_var_ = NewInternal(ast_value_factory_->dot_module_string());
+ scope->module_var_ =
+ NewTemporary(ast_value_factory_->dot_module_string());
++num_modules_;
}
}
@@ -1610,11 +1608,12 @@ int Scope::ContextLocalCount() const {
if (num_heap_slots() == 0) return 0;
bool is_function_var_in_context =
function_ != NULL && function_->proxy()->var()->IsContextSlot();
- return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
- 2 * num_global_slots() - (is_function_var_in_context ? 1 : 0);
+ return num_heap_slots() - Context::MIN_CONTEXT_SLOTS - num_global_slots() -
+ (is_function_var_in_context ? 1 : 0);
}
int Scope::ContextGlobalCount() const { return num_global_slots(); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index 40863800e4..2f600b9f0c 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -119,6 +119,9 @@ class Scope: public ZoneObject {
// outer scope. Only possible for function scopes; at most one variable.
void DeclareFunctionVar(VariableDeclaration* declaration) {
DCHECK(is_function_scope());
+ // Handle implicit declaration of the function name in named function
+ // expressions before other declarations.
+ decls_.InsertAt(0, declaration, zone());
function_ = declaration;
}
@@ -165,16 +168,11 @@ class Scope: public ZoneObject {
// such a variable again if it was added; otherwise this is a no-op.
void RemoveUnresolved(VariableProxy* var);
- // Creates a new internal variable in this scope. The name is only used
- // for printing and cannot be used to find the variable. In particular,
- // the only way to get hold of the temporary is by keeping the Variable*
- // around.
- Variable* NewInternal(const AstRawString* name);
-
- // Creates a new temporary variable in this scope. The name is only used
- // for printing and cannot be used to find the variable. In particular,
- // the only way to get hold of the temporary is by keeping the Variable*
- // around. The name should not clash with a legitimate variable names.
+ // Creates a new temporary variable in this scope's TemporaryScope. The
+ // name is only used for printing and cannot be used to find the variable.
+ // In particular, the only way to get hold of the temporary is by keeping the
+ // Variable* around. The name should not clash with a legitimate variable
+ // names.
Variable* NewTemporary(const AstRawString* name);
// Adds the specific declaration node to the list of declarations in
@@ -280,13 +278,9 @@ class Scope: public ZoneObject {
bool is_block_scope() const { return scope_type_ == BLOCK_SCOPE; }
bool is_with_scope() const { return scope_type_ == WITH_SCOPE; }
bool is_arrow_scope() const { return scope_type_ == ARROW_SCOPE; }
- bool is_declaration_scope() const {
- return is_eval_scope() || is_function_scope() ||
- is_module_scope() || is_script_scope();
- }
- bool is_strict_eval_scope() const {
- return is_eval_scope() && is_strict(language_mode_);
- }
+ bool is_declaration_scope() const { return is_declaration_scope_; }
+
+ void set_is_declaration_scope() { is_declaration_scope_ = true; }
// Information about which scopes calls eval.
bool calls_eval() const { return scope_calls_eval_; }
@@ -392,10 +386,9 @@ class Scope: public ZoneObject {
return rest_index_ >= 0;
}
- bool is_simple_parameter_list() const {
+ bool has_simple_parameters() const {
DCHECK(is_function_scope());
- if (rest_index_ >= 0) return false;
- return true;
+ return has_simple_parameters_;
}
// The local variable 'arguments' if we need to allocate it; NULL otherwise.
@@ -481,15 +474,21 @@ class Scope: public ZoneObject {
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
- // Find the script scope.
- // Used in modules implemenetation to find hosting scope.
- // TODO(rossberg): is this needed?
- Scope* ScriptScope();
-
- // Find the first function, global, or eval scope. This is the scope
- // where var declarations will be hoisted to in the implementation.
+ // Find the first function, script, eval or (declaration) block scope. This is
+ // the scope where var declarations will be hoisted to in the implementation.
Scope* DeclarationScope();
+ // Find the first non-block declaration scope. This should be either a script,
+ // function, or eval scope. Same as DeclarationScope(), but skips
+ // declaration "block" scopes. Used for differentiating associated
+ // function objects (i.e., the scope for which a function prologue allocates
+ // a context) or declaring temporaries.
+ Scope* ClosureScope();
+
+ // Find the first (non-arrow) function or script scope. This is where
+ // 'this' is bound, and what determines the function kind.
+ Scope* ReceiverScope();
+
Handle<ScopeInfo> GetScopeInfo(Isolate* isolate);
// Get the chain of nested scopes within this scope for the source statement
@@ -552,8 +551,6 @@ class Scope: public ZoneObject {
// variables may be implicitly 'declared' by being used (possibly in
// an inner scope) with no intervening with statements or eval calls.
VariableMap variables_;
- // Compiler-allocated (user-invisible) internals.
- ZoneList<Variable*> internals_;
// Compiler-allocated (user-invisible) temporaries.
ZoneList<Variable*> temps_;
// Parameter list in source order.
@@ -614,6 +611,9 @@ class Scope: public ZoneObject {
// constructed based on a serialized scope info or a catch context).
bool already_resolved_;
+ // True if it holds 'var' declarations.
+ bool is_declaration_scope_;
+
// Computed as variables are declared.
int num_var_or_const_;
@@ -625,10 +625,11 @@ class Scope: public ZoneObject {
// The number of modules (including nested ones).
int num_modules_;
- // For module scopes, the host scope's internal variable binding this module.
+ // For module scopes, the host scope's temporary variable binding this module.
Variable* module_var_;
- // Rest parameter
+ // Info about the parameter list of a function.
+ bool has_simple_parameters_;
Variable* rest_parameter_;
int rest_index_;
diff --git a/deps/v8/src/snapshot/OWNERS b/deps/v8/src/snapshot/OWNERS
index 003bdad6bc..6c84c07df7 100644
--- a/deps/v8/src/snapshot/OWNERS
+++ b/deps/v8/src/snapshot/OWNERS
@@ -1,2 +1,5 @@
+set noparent
+
verwaest@chromium.org
+vogelheim@chromium.org
yangguo@chromium.org
diff --git a/deps/v8/src/snapshot/natives-common.cc b/deps/v8/src/snapshot/natives-common.cc
new file mode 100644
index 0000000000..080cd49104
--- /dev/null
+++ b/deps/v8/src/snapshot/natives-common.cc
@@ -0,0 +1,56 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The common functionality when building with internal or external natives.
+
+#include "src/heap/heap.h"
+#include "src/objects-inl.h"
+#include "src/snapshot/natives.h"
+
+namespace v8 {
+namespace internal {
+
+template <>
+FixedArray* NativesCollection<CORE>::GetSourceCache(Heap* heap) {
+ return heap->natives_source_cache();
+}
+
+
+template <>
+FixedArray* NativesCollection<EXPERIMENTAL>::GetSourceCache(Heap* heap) {
+ return heap->experimental_natives_source_cache();
+}
+
+
+template <>
+FixedArray* NativesCollection<EXTRAS>::GetSourceCache(Heap* heap) {
+ return heap->extra_natives_source_cache();
+}
+
+
+template <>
+FixedArray* NativesCollection<CODE_STUB>::GetSourceCache(Heap* heap) {
+ return heap->code_stub_natives_source_cache();
+}
+
+
+template <NativeType type>
+void NativesCollection<type>::UpdateSourceCache(Heap* heap) {
+ for (int i = 0; i < GetBuiltinsCount(); i++) {
+ Object* source = GetSourceCache(heap)->get(i);
+ if (!source->IsUndefined()) {
+ ExternalOneByteString::cast(source)->update_data_cache();
+ }
+ }
+}
+
+
+// Explicit template instantiations.
+template void NativesCollection<CORE>::UpdateSourceCache(Heap* heap);
+template void NativesCollection<CODE_STUB>::UpdateSourceCache(Heap* heap);
+template void NativesCollection<EXPERIMENTAL>::UpdateSourceCache(Heap* heap);
+template void NativesCollection<EXTRAS>::UpdateSourceCache(Heap* heap);
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/natives-external.cc b/deps/v8/src/snapshot/natives-external.cc
index 3649c2a69f..e0df27f3ad 100644
--- a/deps/v8/src/snapshot/natives-external.cc
+++ b/deps/v8/src/snapshot/natives-external.cc
@@ -157,6 +157,7 @@ void ReadNatives() {
if (natives_blob_ && NativesHolder<CORE>::empty()) {
SnapshotByteSource bytes(natives_blob_->data, natives_blob_->raw_size);
NativesHolder<CORE>::set(NativesStore::MakeFromScriptsSource(&bytes));
+ NativesHolder<CODE_STUB>::set(NativesStore::MakeFromScriptsSource(&bytes));
NativesHolder<EXPERIMENTAL>::set(
NativesStore::MakeFromScriptsSource(&bytes));
NativesHolder<EXTRAS>::set(NativesStore::MakeFromScriptsSource(&bytes));
@@ -185,6 +186,7 @@ void SetNativesFromFile(StartupData* natives_blob) {
*/
void DisposeNatives() {
NativesHolder<CORE>::Dispose();
+ NativesHolder<CODE_STUB>::Dispose();
NativesHolder<EXPERIMENTAL>::Dispose();
NativesHolder<EXTRAS>::Dispose();
}
@@ -227,10 +229,19 @@ Vector<const char> NativesCollection<type>::GetScriptsSource() {
}
-// The compiler can't 'see' all uses of the static methods and hence
-// my choice to elide them. This we'll explicitly instantiate these.
-template class NativesCollection<CORE>;
-template class NativesCollection<EXPERIMENTAL>;
-template class NativesCollection<EXTRAS>;
+// Explicit template instantiations.
+#define INSTANTIATE_TEMPLATES(T) \
+ template int NativesCollection<T>::GetBuiltinsCount(); \
+ template int NativesCollection<T>::GetDebuggerCount(); \
+ template int NativesCollection<T>::GetIndex(const char* name); \
+ template Vector<const char> NativesCollection<T>::GetScriptSource(int i); \
+ template Vector<const char> NativesCollection<T>::GetScriptName(int i); \
+ template Vector<const char> NativesCollection<T>::GetScriptsSource();
+INSTANTIATE_TEMPLATES(CORE)
+INSTANTIATE_TEMPLATES(CODE_STUB)
+INSTANTIATE_TEMPLATES(EXPERIMENTAL)
+INSTANTIATE_TEMPLATES(EXTRAS)
+#undef INSTANTIATE_TEMPLATES
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/natives.h b/deps/v8/src/snapshot/natives.h
index 149f9ec2be..1efaf7ece5 100644
--- a/deps/v8/src/snapshot/natives.h
+++ b/deps/v8/src/snapshot/natives.h
@@ -5,6 +5,7 @@
#ifndef V8_SNAPSHOT_NATIVES_H_
#define V8_SNAPSHOT_NATIVES_H_
+#include "src/objects.h"
#include "src/vector.h"
namespace v8 { class StartupData; } // Forward declaration.
@@ -12,11 +13,13 @@ namespace v8 { class StartupData; } // Forward declaration.
namespace v8 {
namespace internal {
-enum NativeType { CORE, EXPERIMENTAL, EXTRAS, D8, TEST };
+enum NativeType { CORE, CODE_STUB, EXPERIMENTAL, EXTRAS, D8, TEST };
template <NativeType type>
class NativesCollection {
public:
+ // The following methods are implemented in js2c-generated code:
+
// Number of built-in scripts.
static int GetBuiltinsCount();
// Number of debugger implementation scripts.
@@ -30,12 +33,19 @@ class NativesCollection {
static Vector<const char> GetScriptSource(int index);
static Vector<const char> GetScriptName(int index);
static Vector<const char> GetScriptsSource();
+
+ // The following methods are implemented in natives-common.cc:
+
+ static FixedArray* GetSourceCache(Heap* heap);
+ static void UpdateSourceCache(Heap* heap);
};
typedef NativesCollection<CORE> Natives;
+typedef NativesCollection<CODE_STUB> CodeStubNatives;
typedef NativesCollection<EXPERIMENTAL> ExperimentalNatives;
typedef NativesCollection<EXTRAS> ExtraNatives;
+
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
// Used for reading the natives at runtime. Implementation in natives-empty.cc
void SetNativesFromFile(StartupData* natives_blob);
diff --git a/deps/v8/src/snapshot/serialize.cc b/deps/v8/src/snapshot/serialize.cc
index 7588fbcf90..9f2b4e9314 100644
--- a/deps/v8/src/snapshot/serialize.cc
+++ b/deps/v8/src/snapshot/serialize.cc
@@ -60,7 +60,6 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
"Heap::NewSpaceAllocationLimitAddress()");
Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
"Heap::NewSpaceAllocationTopAddress()");
- Add(ExternalReference::debug_break(isolate).address(), "Debug::Break()");
Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
"Debug::step_in_fp_addr()");
Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
@@ -221,20 +220,6 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
Add(ref.address(), runtime_functions[i].name);
}
- static const RefTableEntry inline_caches[] = {
-#define IC_ENTRY(name) \
- { IC::k##name, "IC::" #name } \
- ,
- IC_UTIL_LIST(IC_ENTRY)
-#undef IC_ENTRY
- };
-
- for (unsigned i = 0; i < arraysize(inline_caches); ++i) {
- ExternalReference ref(
- IC_Utility(static_cast<IC::UtilityId>(inline_caches[i].id)), isolate);
- Add(ref.address(), runtime_functions[i].name);
- }
-
// Stat counters
struct StatsRefTableEntry {
StatsCounter* (Counters::*counter)();
@@ -515,16 +500,19 @@ void Deserializer::DecodeReservation(
}
-void Deserializer::FlushICacheForNewCodeObjects() {
- if (!deserializing_user_code_) {
- // The entire isolate is newly deserialized. Simply flush all code pages.
- PageIterator it(isolate_->heap()->code_space());
- while (it.has_next()) {
- Page* p = it.next();
- CpuFeatures::FlushICache(p->area_start(),
- p->area_end() - p->area_start());
- }
+void Deserializer::FlushICacheForNewIsolate() {
+ DCHECK(!deserializing_user_code_);
+ // The entire isolate is newly deserialized. Simply flush all code pages.
+ PageIterator it(isolate_->heap()->code_space());
+ while (it.has_next()) {
+ Page* p = it.next();
+ CpuFeatures::FlushICache(p->area_start(), p->area_end() - p->area_start());
}
+}
+
+
+void Deserializer::FlushICacheForNewCodeObjects() {
+ DCHECK(deserializing_user_code_);
for (Code* code : new_code_objects_) {
CpuFeatures::FlushICache(code->instruction_start(),
code->instruction_size());
@@ -572,10 +560,11 @@ void Deserializer::Deserialize(Isolate* isolate) {
isolate_->heap()->RepairFreeListsAfterDeserialization();
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
DeserializeDeferredObjects();
+ FlushICacheForNewIsolate();
}
isolate_->heap()->set_native_contexts_list(
- isolate_->heap()->undefined_value());
+ isolate_->heap()->code_stub_context());
// The allocation site list is build during root iteration, but if no sites
// were encountered then it needs to be initialized to undefined.
@@ -585,14 +574,9 @@ void Deserializer::Deserialize(Isolate* isolate) {
}
// Update data pointers to the external strings containing natives sources.
- for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Object* source = isolate_->heap()->natives_source_cache()->get(i);
- if (!source->IsUndefined()) {
- ExternalOneByteString::cast(source)->update_data_cache();
- }
- }
-
- FlushICacheForNewCodeObjects();
+ Natives::UpdateSourceCache(isolate_->heap());
+ ExtraNatives::UpdateSourceCache(isolate_->heap());
+ CodeStubNatives::UpdateSourceCache(isolate_->heap());
// Issue code events for newly deserialized code objects.
LOG_CODE_EVENT(isolate_, LogCodeObjects());
@@ -649,6 +633,7 @@ MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode(
Object* root;
VisitPointer(&root);
DeserializeDeferredObjects();
+ FlushICacheForNewCodeObjects();
result = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root));
}
CommitNewInternalizedStrings(isolate);
@@ -907,6 +892,17 @@ Address Deserializer::Allocate(int space_index, int size) {
}
+Object** Deserializer::CopyInNativesSource(Vector<const char> source_vector,
+ Object** current) {
+ DCHECK(!isolate_->heap()->deserialization_complete());
+ NativesExternalStringResource* resource = new NativesExternalStringResource(
+ source_vector.start(), source_vector.length());
+ Object* resource_obj = reinterpret_cast<Object*>(resource);
+ UnalignedCopy(current++, &resource_obj);
+ return current;
+}
+
+
bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
Address current_object_address) {
Isolate* const isolate = isolate_;
@@ -1173,17 +1169,20 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
CHECK(false);
break;
- case kNativesStringResource: {
- DCHECK(!isolate_->heap()->deserialization_complete());
- int index = source_.Get();
- Vector<const char> source_vector = Natives::GetScriptSource(index);
- NativesExternalStringResource* resource =
- new NativesExternalStringResource(source_vector.start(),
- source_vector.length());
- Object* resource_obj = reinterpret_cast<Object*>(resource);
- UnalignedCopy(current++, &resource_obj);
+ case kNativesStringResource:
+ current = CopyInNativesSource(Natives::GetScriptSource(source_.Get()),
+ current);
+ break;
+
+ case kExtraNativesStringResource:
+ current = CopyInNativesSource(
+ ExtraNatives::GetScriptSource(source_.Get()), current);
+ break;
+
+ case kCodeStubNativesStringResource:
+ current = CopyInNativesSource(
+ CodeStubNatives::GetScriptSource(source_.Get()), current);
break;
- }
// Deserialize raw data of variable length.
case kVariableRawData: {
@@ -1419,6 +1418,17 @@ void PartialSerializer::Serialize(Object** o) {
Context* context = Context::cast(*o);
global_object_ = context->global_object();
back_reference_map()->AddGlobalProxy(context->global_proxy());
+ // The bootstrap snapshot has a code-stub context. When serializing the
+ // partial snapshot, it is chained into the weak context list on the isolate
+ // and it's next context pointer may point to the code-stub context. Clear
+ // it before serializing, it will get re-added to the context list
+ // explicitly when it's loaded.
+ if (context->IsNativeContext()) {
+ context->set(Context::NEXT_CONTEXT_LINK,
+ isolate_->heap()->undefined_value());
+ DCHECK(!context->global_object()->IsUndefined());
+ DCHECK(!context->builtins()->IsUndefined());
+ }
}
VisitPointer(o);
SerializeDeferredObjects();
@@ -1623,7 +1633,10 @@ bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
- DCHECK(!obj->IsJSFunction());
+ // Make sure that all functions are derived from the code-stub context
+ DCHECK(!obj->IsJSFunction() ||
+ JSFunction::cast(obj)->GetCreationContext() ==
+ isolate()->heap()->code_stub_context());
int root_index = root_index_map_.Lookup(obj);
// We can only encode roots as such if it has already been serialized.
@@ -1908,38 +1921,10 @@ void Serializer::ObjectSerializer::Serialize() {
// We don't expect fillers.
DCHECK(!object_->IsFiller());
- if (object_->IsPrototypeInfo()) {
- Object* prototype_users = PrototypeInfo::cast(object_)->prototype_users();
- if (prototype_users->IsWeakFixedArray()) {
- WeakFixedArray* array = WeakFixedArray::cast(prototype_users);
- array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
- }
- }
- // Compaction of a prototype users list can require the registered users
- // to update their remembered slots. That doesn't work if those users
- // have already been serialized themselves. So if this object is a
- // registered user, compact its prototype's user list now.
- if (object_->IsMap()) {
- Map* map = Map::cast(object_);
- if (map->is_prototype_map() && map->prototype_info()->IsPrototypeInfo() &&
- PrototypeInfo::cast(map->prototype_info())->registry_slot() !=
- PrototypeInfo::UNREGISTERED) {
- JSObject* proto = JSObject::cast(map->prototype());
- PrototypeInfo* info = PrototypeInfo::cast(proto->map()->prototype_info());
- WeakFixedArray* array = WeakFixedArray::cast(info->prototype_users());
- array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
- }
- }
-
if (object_->IsScript()) {
// Clear cached line ends.
Object* undefined = serializer_->isolate()->heap()->undefined_value();
Script::cast(object_)->set_line_ends(undefined);
- Object* shared_list = Script::cast(object_)->shared_function_infos();
- if (shared_list->IsWeakFixedArray()) {
- WeakFixedArray::cast(shared_list)
- ->Compact<WeakFixedArray::NullCallback>();
- }
}
if (object_->IsExternalString()) {
@@ -2144,25 +2129,50 @@ void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
}
-void Serializer::ObjectSerializer::VisitExternalOneByteString(
- v8::String::ExternalOneByteStringResource** resource_pointer) {
- Address references_start = reinterpret_cast<Address>(resource_pointer);
- OutputRawData(references_start);
- for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Object* source =
- serializer_->isolate()->heap()->natives_source_cache()->get(i);
+bool Serializer::ObjectSerializer::SerializeExternalNativeSourceString(
+ int builtin_count,
+ v8::String::ExternalOneByteStringResource** resource_pointer,
+ FixedArray* source_cache, int resource_index) {
+ for (int i = 0; i < builtin_count; i++) {
+ Object* source = source_cache->get(i);
if (!source->IsUndefined()) {
ExternalOneByteString* string = ExternalOneByteString::cast(source);
typedef v8::String::ExternalOneByteStringResource Resource;
const Resource* resource = string->resource();
if (resource == *resource_pointer) {
- sink_->Put(kNativesStringResource, "NativesStringResource");
+ sink_->Put(resource_index, "NativesStringResource");
sink_->PutSection(i, "NativesStringResourceEnd");
bytes_processed_so_far_ += sizeof(resource);
- return;
+ return true;
}
}
}
+ return false;
+}
+
+
+void Serializer::ObjectSerializer::VisitExternalOneByteString(
+ v8::String::ExternalOneByteStringResource** resource_pointer) {
+ Address references_start = reinterpret_cast<Address>(resource_pointer);
+ OutputRawData(references_start);
+ if (SerializeExternalNativeSourceString(
+ Natives::GetBuiltinsCount(), resource_pointer,
+ Natives::GetSourceCache(serializer_->isolate()->heap()),
+ kNativesStringResource)) {
+ return;
+ }
+ if (SerializeExternalNativeSourceString(
+ ExtraNatives::GetBuiltinsCount(), resource_pointer,
+ ExtraNatives::GetSourceCache(serializer_->isolate()->heap()),
+ kExtraNativesStringResource)) {
+ return;
+ }
+ if (SerializeExternalNativeSourceString(
+ CodeStubNatives::GetBuiltinsCount(), resource_pointer,
+ CodeStubNatives::GetSourceCache(serializer_->isolate()->heap()),
+ kCodeStubNativesStringResource)) {
+ return;
+ }
// One of the strings in the natives cache should match the resource. We
// don't expect any other kinds of external strings here.
UNREACHABLE();
@@ -2494,7 +2504,7 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
HandleScope scope(isolate);
- SmartPointer<SerializedCodeData> scd(
+ base::SmartPointer<SerializedCodeData> scd(
SerializedCodeData::FromCachedData(isolate, cached_data, *source));
if (scd.is_empty()) {
if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
@@ -2522,7 +2532,6 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n");
return MaybeHandle<SharedFunctionInfo>();
}
- deserializer.FlushICacheForNewCodeObjects();
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
diff --git a/deps/v8/src/snapshot/serialize.h b/deps/v8/src/snapshot/serialize.h
index 001d775392..e790062913 100644
--- a/deps/v8/src/snapshot/serialize.h
+++ b/deps/v8/src/snapshot/serialize.h
@@ -6,7 +6,6 @@
#define V8_SNAPSHOT_SERIALIZE_H_
#include "src/hashmap.h"
-#include "src/heap-profiler.h"
#include "src/isolate.h"
#include "src/snapshot/snapshot-source-sink.h"
@@ -156,6 +155,8 @@ class BackReference {
ChunkOffsetBits::encode(index));
}
+ static BackReference DummyReference() { return BackReference(kDummyValue); }
+
static BackReference Reference(AllocationSpace space, uint32_t chunk_index,
uint32_t chunk_offset) {
DCHECK(IsAligned(chunk_offset, kObjectAlignment));
@@ -201,6 +202,7 @@ class BackReference {
static const uint32_t kInvalidValue = 0xFFFFFFFF;
static const uint32_t kSourceValue = 0xFFFFFFFE;
static const uint32_t kGlobalProxyValue = 0xFFFFFFFD;
+ static const uint32_t kDummyValue = 0xFFFFFFFC;
static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
@@ -381,23 +383,29 @@ class SerializerDeserializer: public ObjectVisitor {
static const int kNextChunk = 0x3e;
// Deferring object content.
static const int kDeferred = 0x3f;
+ // Used for the source code of the natives, which is in the executable, but
+ // is referred to from external strings in the snapshot.
+ static const int kNativesStringResource = 0x5d;
+ // Used for the source code for compiled stubs, which is in the executable,
+ // but is referred to from external strings in the snapshot.
+ static const int kCodeStubNativesStringResource = 0x5e;
+ // Used for the source code for V8 extras, which is in the executable,
+ // but is referred to from external strings in the snapshot.
+ static const int kExtraNativesStringResource = 0x5f;
// A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together.
// Examine the build process for architecture, version or configuration
// mismatches.
static const int kSynchronize = 0x17;
- // Used for the source code of the natives, which is in the executable, but
- // is referred to from external strings in the snapshot.
- static const int kNativesStringResource = 0x37;
+ // Repeats of variable length.
+ static const int kVariableRepeat = 0x37;
// Raw data of variable length.
static const int kVariableRawData = 0x57;
- // Repeats of variable length.
- static const int kVariableRepeat = 0x77;
// Alignment prefixes 0x7d..0x7f
static const int kAlignmentPrefix = 0x7d;
- // 0x5d..0x5f unused
+ // 0x77 unused
// ---------- byte code range 0x80..0xff ----------
// First 32 root array items.
@@ -539,8 +547,6 @@ class Deserializer: public SerializerDeserializer {
// Deserialize a shared function info. Fail gracefully.
MaybeHandle<SharedFunctionInfo> DeserializeCode(Isolate* isolate);
- void FlushICacheForNewCodeObjects();
-
// Pass a vector of externally-provided objects referenced by the snapshot.
// The ownership to its backing store is handed over as well.
void SetAttachedObjects(Vector<Handle<Object> > attached_objects) {
@@ -568,6 +574,9 @@ class Deserializer: public SerializerDeserializer {
void DeserializeDeferredObjects();
+ void FlushICacheForNewIsolate();
+ void FlushICacheForNewCodeObjects();
+
void CommitNewInternalizedStrings(Isolate* isolate);
// Fills in some heap data in an area from start to end (non-inclusive). The
@@ -587,6 +596,9 @@ class Deserializer: public SerializerDeserializer {
// snapshot by chunk index and offset.
HeapObject* GetBackReferencedObject(int space);
+ Object** CopyInNativesSource(Vector<const char> source_vector,
+ Object** current);
+
// Cached current isolate.
Isolate* isolate_;
@@ -675,6 +687,11 @@ class Serializer : public SerializerDeserializer {
private:
void SerializePrologue(AllocationSpace space, int size, Map* map);
+ bool SerializeExternalNativeSourceString(
+ int builtin_count,
+ v8::String::ExternalOneByteStringResource** resource_pointer,
+ FixedArray* source_cache, int resource_index);
+
enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn };
// This function outputs or skips the raw data between the last pointer and
// up to the current position. It optionally can just return the number of
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index ab8a88486e..743178b51b 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -8,7 +8,7 @@
#include "src/api.h"
#include "src/base/platform/platform.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
@@ -137,7 +137,7 @@ void CalculateFirstPageSizes(bool is_default_snapshot,
2 * context_reservations[context_index].chunk_size()) +
Page::kObjectStartOffset;
// Add a small allowance to the code space for small scripts.
- if (space == CODE_SPACE) required += 64 * KB;
+ if (space == CODE_SPACE) required += 32 * KB;
} else {
// We expect the vanilla snapshot to only require on page per space.
DCHECK(!is_default_snapshot);
diff --git a/deps/v8/src/string-builder.cc b/deps/v8/src/string-builder.cc
index 7c46e0d523..30c64b3c6d 100644
--- a/deps/v8/src/string-builder.cc
+++ b/deps/v8/src/string-builder.cc
@@ -4,6 +4,8 @@
#include "src/string-builder.h"
+#include "src/objects-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/string-builder.h b/deps/v8/src/string-builder.h
index 5314665329..554277dab1 100644
--- a/deps/v8/src/string-builder.h
+++ b/deps/v8/src/string-builder.h
@@ -5,7 +5,12 @@
#ifndef V8_STRING_BUILDER_H_
#define V8_STRING_BUILDER_H_
-#include "src/v8.h"
+#include "src/assert-scope.h"
+#include "src/factory.h"
+#include "src/handles.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/string-iterator.js b/deps/v8/src/string-iterator.js
index 536430ee8f..bb392ef10e 100644
--- a/deps/v8/src/string-iterator.js
+++ b/deps/v8/src/string-iterator.js
@@ -41,7 +41,7 @@ function CreateStringIterator(string) {
// 21.1.5.2.1 %StringIteratorPrototype%.next( )
function StringIteratorNext() {
- var iterator = $toObject(this);
+ var iterator = TO_OBJECT(this);
if (!HAS_DEFINED_PRIVATE(iterator, stringIteratorNextIndexSymbol)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
diff --git a/deps/v8/src/string-search.cc b/deps/v8/src/string-search.cc
deleted file mode 100644
index 837f938095..0000000000
--- a/deps/v8/src/string-search.cc
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/string-search.h"
-
-namespace v8 {
-namespace internal {
-
-// Storage for constants used by string-search.
-
-// Now in Isolate:
-// bad_char_shift_table()
-// good_suffix_shift_table()
-// suffix_table()
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 5f61e0da1d..92fff27683 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -250,11 +250,11 @@ void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
}
-SmartArrayPointer<const char> StringStream::ToCString() const {
+base::SmartArrayPointer<const char> StringStream::ToCString() const {
char* str = NewArray<char>(length_ + 1);
MemCopy(str, buffer_, length_);
str[length_] = '\0';
- return SmartArrayPointer<const char>(str);
+ return base::SmartArrayPointer<const char>(str);
}
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h
index cc50bb7150..d03f1b04c9 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/string-stream.h
@@ -5,11 +5,17 @@
#ifndef V8_STRING_STREAM_H_
#define V8_STRING_STREAM_H_
+#include "src/allocation.h"
+#include "src/base/smart-pointers.h"
#include "src/handles.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
+// Forward declarations.
+class ByteArray;
+
class StringAllocator {
public:
virtual ~StringAllocator() { }
@@ -129,7 +135,7 @@ class StringStream final {
void OutputToStdOut() { OutputToFile(stdout); }
void Log(Isolate* isolate);
Handle<String> ToString(Isolate* isolate);
- SmartArrayPointer<const char> ToCString() const;
+ base::SmartArrayPointer<const char> ToCString() const;
int length() const { return length_; }
// Object printing support.
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 3ddd6d26ce..8e7fc6c01c 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -9,18 +9,19 @@
// -------------------------------------------------------------------
// Imports
+var ArrayIndexOf;
+var ArrayJoin;
var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
-
-var ArrayIndexOf;
-var ArrayJoin;
var MathMax;
var MathMin;
var RegExpExec;
var RegExpExecNoTests;
var RegExpLastMatchInfo;
+var ToNumber;
+var ToString;
utils.Import(function(from) {
ArrayIndexOf = from.ArrayIndexOf;
@@ -30,6 +31,8 @@ utils.Import(function(from) {
RegExpExec = from.RegExpExec;
RegExpExecNoTests = from.RegExpExecNoTests;
RegExpLastMatchInfo = from.RegExpLastMatchInfo;
+ ToNumber = from.ToNumber;
+ ToString = from.ToString;
});
//-------------------------------------------------------------------
@@ -132,7 +135,7 @@ function StringLastIndexOfJS(pat /* position */) { // length == 1
var patLength = pat.length;
var index = subLength - patLength;
if (%_ArgumentsLength() > 1) {
- var position = $toNumber(%_Arguments(1));
+ var position = ToNumber(%_Arguments(1));
if (!NUMBER_IS_NAN(position)) {
position = TO_INTEGER(position);
if (position < 0) {
@@ -189,10 +192,12 @@ function StringMatchJS(regexp) {
// For now we do nothing, as proper normalization requires big tables.
// If Intl is enabled, then i18n.js will override it and provide the the
// proper functionality.
-function StringNormalizeJS(form) {
+function StringNormalizeJS() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
+ var s = TO_STRING_INLINE(this);
- var form = form ? TO_STRING_INLINE(form) : 'NFC';
+ var formArg = %_Arguments(0);
+ var form = IS_UNDEFINED(formArg) ? 'NFC' : TO_STRING_INLINE(formArg);
var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
var normalizationForm =
@@ -202,7 +207,7 @@ function StringNormalizeJS(form) {
%_CallFunction(NORMALIZATION_FORMS, ', ', ArrayJoin));
}
- return %_ValueOf(this);
+ return s;
}
@@ -546,9 +551,7 @@ function StringSearch(re) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.search");
var regexp;
- if (IS_STRING(re)) {
- regexp = %_GetFromCache(STRING_TO_REGEXP_CACHE_ID, re);
- } else if (IS_REGEXP(re)) {
+ if (IS_REGEXP(re)) {
regexp = re;
} else {
regexp = new GlobalRegExp(re);
@@ -823,7 +826,7 @@ function StringTrimRight() {
function StringFromCharCode(code) {
var n = %_ArgumentsLength();
if (n == 1) {
- if (!%_IsSmi(code)) code = $toNumber(code);
+ if (!%_IsSmi(code)) code = ToNumber(code);
return %_StringCharFromCode(code & 0xffff);
}
@@ -831,7 +834,7 @@ function StringFromCharCode(code) {
var i;
for (i = 0; i < n; i++) {
var code = %_Arguments(i);
- if (!%_IsSmi(code)) code = $toNumber(code) & 0xffff;
+ if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
if (code < 0) code = code & 0xffff;
if (code > 0xff) break;
%_OneByteSeqStringSetChar(i, code, one_byte);
@@ -842,7 +845,7 @@ function StringFromCharCode(code) {
var two_byte = %NewString(n - i, NEW_TWO_BYTE_STRING);
for (var j = 0; i < n; i++, j++) {
var code = %_Arguments(i);
- if (!%_IsSmi(code)) code = $toNumber(code) & 0xffff;
+ if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
%_TwoByteSeqStringSetChar(j, code, two_byte);
}
return one_byte + two_byte;
@@ -1031,27 +1034,29 @@ function StringEndsWith(searchString /* position */) { // length == 1
function StringIncludes(searchString /* position */) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "String.prototype.includes");
- var s = TO_STRING_INLINE(this);
+ var string = TO_STRING_INLINE(this);
if (IS_REGEXP(searchString)) {
throw MakeTypeError(kFirstArgumentNotRegExp, "String.prototype.includes");
}
- var ss = TO_STRING_INLINE(searchString);
+ searchString = TO_STRING_INLINE(searchString);
var pos = 0;
if (%_ArgumentsLength() > 1) {
pos = %_Arguments(1); // position
- pos = $toInteger(pos);
+ pos = TO_INTEGER(pos);
}
- var s_len = s.length;
- var start = MathMin(MathMax(pos, 0), s_len);
- var ss_len = ss.length;
- if (ss_len + start > s_len) {
+ var stringLength = string.length;
+ if (pos < 0) pos = 0;
+ if (pos > stringLength) pos = stringLength;
+ var searchStringLength = searchString.length;
+
+ if (searchStringLength + pos > stringLength) {
return false;
}
- return %StringIndexOf(s, ss, start) !== -1;
+ return %StringIndexOf(string, searchString, pos) !== -1;
}
@@ -1086,7 +1091,7 @@ function StringFromCodePoint(_) { // length = 1
for (index = 0; index < length; index++) {
code = %_Arguments(index);
if (!%_IsSmi(code)) {
- code = $toNumber(code);
+ code = ToNumber(code);
}
if (code < 0 || code > 0x10FFFF || code !== TO_INTEGER(code)) {
throw MakeRangeError(kInvalidCodePoint, code);
@@ -1110,18 +1115,18 @@ function StringFromCodePoint(_) { // length = 1
function StringRaw(callSite) {
// TODO(caitp): Use rest parameters when implemented
var numberOfSubstitutions = %_ArgumentsLength();
- var cooked = $toObject(callSite);
- var raw = $toObject(cooked.raw);
+ var cooked = TO_OBJECT(callSite);
+ var raw = TO_OBJECT(cooked.raw);
var literalSegments = $toLength(raw.length);
if (literalSegments <= 0) return "";
- var result = $toString(raw[0]);
+ var result = ToString(raw[0]);
for (var i = 1; i < literalSegments; ++i) {
if (i < numberOfSubstitutions) {
- result += $toString(%_Arguments(i));
+ result += ToString(%_Arguments(i));
}
- result += $toString(raw[i]);
+ result += ToString(raw[i]);
}
return result;
@@ -1199,6 +1204,7 @@ utils.Export(function(to) {
to.StringLastIndexOf = StringLastIndexOfJS;
to.StringMatch = StringMatchJS;
to.StringReplace = StringReplace;
+ to.StringSlice = StringSlice;
to.StringSplit = StringSplitJS;
to.StringSubstr = StringSubstr;
to.StringSubstring = StringSubstring;
diff --git a/deps/v8/src/strings-storage.cc b/deps/v8/src/strings-storage.cc
index 533fa8959c..8ddf291fcc 100644
--- a/deps/v8/src/strings-storage.cc
+++ b/deps/v8/src/strings-storage.cc
@@ -80,7 +80,7 @@ const char* StringsStorage::GetName(Name* name) {
String* str = String::cast(name);
int length = Min(kMaxNameSize, str->length());
int actual_length = 0;
- SmartArrayPointer<char> data = str->ToCString(
+ base::SmartArrayPointer<char> data = str->ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length, &actual_length);
return AddOrDisposeString(data.Detach(), actual_length);
} else if (name->IsSymbol()) {
diff --git a/deps/v8/src/strtod.cc b/deps/v8/src/strtod.cc
index ec26845f51..31dab94f12 100644
--- a/deps/v8/src/strtod.cc
+++ b/deps/v8/src/strtod.cc
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/strtod.h"
+
#include <stdarg.h>
#include <cmath>
-#include "src/v8.h"
-
#include "src/bignum.h"
#include "src/cached-powers.h"
#include "src/double.h"
#include "src/globals.h"
-#include "src/strtod.h"
#include "src/utils.h"
namespace v8 {
diff --git a/deps/v8/src/strtod.h b/deps/v8/src/strtod.h
index f4ce731a17..737b5484c5 100644
--- a/deps/v8/src/strtod.h
+++ b/deps/v8/src/strtod.h
@@ -5,6 +5,8 @@
#ifndef V8_STRTOD_H_
#define V8_STRTOD_H_
+#include "src/vector.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/symbol.js b/deps/v8/src/symbol.js
index 8ac7fe7011..8cac2c56a2 100644
--- a/deps/v8/src/symbol.js
+++ b/deps/v8/src/symbol.js
@@ -23,11 +23,12 @@ var $symbolToString;
var GlobalObject = global.Object;
var GlobalSymbol = global.Symbol;
-
var ObjectGetOwnPropertyKeys;
+var ToString;
utils.Import(function(from) {
ObjectGetOwnPropertyKeys = from.ObjectGetOwnPropertyKeys;
+ ToString = from.ToString;
});
// -------------------------------------------------------------------
@@ -35,7 +36,7 @@ utils.Import(function(from) {
function SymbolConstructor(x) {
if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Symbol");
// NOTE: Passing in a Symbol value will throw on ToString().
- return %CreateSymbol(IS_UNDEFINED(x) ? x : $toString(x));
+ return %CreateSymbol(IS_UNDEFINED(x) ? x : ToString(x));
}
@@ -78,7 +79,7 @@ function SymbolKeyFor(symbol) {
// ES6 19.1.2.8
function ObjectGetOwnPropertySymbols(obj) {
- obj = $toObject(obj);
+ obj = TO_OBJECT(obj);
// TODO(arv): Proxies use a shared trap for String and Symbol keys.
diff --git a/deps/v8/src/third_party/fdlibm/fdlibm.cc b/deps/v8/src/third_party/fdlibm/fdlibm.cc
index ea3efd35be..1d49de0248 100644
--- a/deps/v8/src/third_party/fdlibm/fdlibm.cc
+++ b/deps/v8/src/third_party/fdlibm/fdlibm.cc
@@ -13,11 +13,14 @@
// modified significantly by Google Inc.
// Copyright 2014 the V8 project authors. All rights reserved.
-#include "src/v8.h"
-
-#include "src/double.h"
#include "src/third_party/fdlibm/fdlibm.h"
+#include <stdint.h>
+#include <cmath>
+#include <limits>
+
+#include "src/base/macros.h"
+#include "src/double.h"
namespace v8 {
namespace fdlibm {
diff --git a/deps/v8/src/third_party/vtune/v8vtune.gyp b/deps/v8/src/third_party/vtune/v8vtune.gyp
index 6adf365689..92df29a82b 100644
--- a/deps/v8/src/third_party/vtune/v8vtune.gyp
+++ b/deps/v8/src/third_party/vtune/v8vtune.gyp
@@ -37,6 +37,10 @@
'dependencies': [
'../../../tools/gyp/v8.gyp:v8',
],
+ 'defines': [
+ # TODO(jochen): Remove again after this is globally turned on.
+ 'V8_IMMINENT_DEPRECATION_WARNINGS',
+ ],
'sources': [
'ittnotify_config.h',
'ittnotify_types.h',
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.cc b/deps/v8/src/third_party/vtune/vtune-jit.cc
index b621cbcb8f..30f6196001 100644
--- a/deps/v8/src/third_party/vtune/vtune-jit.cc
+++ b/deps/v8/src/third_party/vtune/vtune-jit.cc
@@ -192,12 +192,13 @@ void VTUNEJITInterface::event_handler(const v8::JitCodeEvent* event) {
jmethod.method_size = static_cast<unsigned int>(event->code_len);
jmethod.method_name = temp_method_name;
- Handle<UnboundScript> script = event->script;
+ Local<UnboundScript> script = event->script;
if (*script != NULL) {
// Get the source file name and set it to jmethod.source_file_name
if ((*script->GetScriptName())->IsString()) {
- Handle<String> script_name = script->GetScriptName()->ToString();
+ Local<String> script_name =
+ Local<String>::Cast(script->GetScriptName());
temp_file_name = new char[script_name->Utf8Length() + 1];
script_name->WriteUtf8(temp_file_name);
jmethod.source_file_name = temp_file_name;
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 09884d5066..9870e17d83 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -106,10 +106,9 @@ void TransitionArray::Insert(Handle<Map> map, Handle<Name> name,
}
// We're gonna need a bigger TransitionArray.
- Handle<TransitionArray> result =
- Allocate(map->GetIsolate(), new_nof,
- Map::SlackForArraySize(false, number_of_transitions,
- kMaxNumberOfTransitions));
+ Handle<TransitionArray> result = Allocate(
+ map->GetIsolate(), new_nof,
+ Map::SlackForArraySize(number_of_transitions, kMaxNumberOfTransitions));
// The map's transition array may have shrunk during the allocation above as
// it was weakly traversed, though it is guaranteed not to disappear. Trim the
@@ -256,8 +255,10 @@ void TransitionArray::PutPrototypeTransition(Handle<Map> map,
// Grow array by factor 2 up to MaxCachedPrototypeTransitions.
int new_capacity = Min(kMaxCachedPrototypeTransitions, transitions * 2);
if (new_capacity == capacity) return;
+ int grow_by = new_capacity - capacity;
- cache = FixedArray::CopySize(cache, header + new_capacity);
+ Isolate* isolate = map->GetIsolate();
+ cache = isolate->factory()->CopyFixedArrayAndGrow(cache, grow_by);
if (capacity < 0) {
// There was no prototype transitions array before, so the size
// couldn't be copied. Initialize it explicitly.
diff --git a/deps/v8/src/type-feedback-vector.cc b/deps/v8/src/type-feedback-vector.cc
index adf0a5078a..c93e620d7d 100644
--- a/deps/v8/src/type-feedback-vector.cc
+++ b/deps/v8/src/type-feedback-vector.cc
@@ -24,10 +24,8 @@ TypeFeedbackVector::VectorICKind TypeFeedbackVector::FromCodeKind(
case Code::KEYED_LOAD_IC:
return KindKeyedLoadIC;
case Code::STORE_IC:
- DCHECK(FLAG_vector_stores);
return KindStoreIC;
case Code::KEYED_STORE_IC:
- DCHECK(FLAG_vector_stores);
return KindKeyedStoreIC;
default:
// Shouldn't get here.
@@ -128,6 +126,26 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(Isolate* isolate,
// static
+int TypeFeedbackVector::PushAppliedArgumentsIndex() {
+ const int index_count = VectorICComputer::word_count(1);
+ return kReservedIndexCount + index_count;
+}
+
+
+// static
+Handle<TypeFeedbackVector> TypeFeedbackVector::CreatePushAppliedArgumentsVector(
+ Isolate* isolate) {
+ Code::Kind kinds[] = {Code::KEYED_LOAD_IC};
+ FeedbackVectorSpec spec(0, 1, kinds);
+ Handle<TypeFeedbackVector> feedback_vector =
+ isolate->factory()->NewTypeFeedbackVector(&spec);
+ DCHECK(PushAppliedArgumentsIndex() ==
+ feedback_vector->GetIndex(FeedbackVectorICSlot(0)));
+ return feedback_vector;
+}
+
+
+// static
Handle<TypeFeedbackVector> TypeFeedbackVector::Copy(
Isolate* isolate, Handle<TypeFeedbackVector> vector) {
Handle<TypeFeedbackVector> result;
@@ -225,6 +243,12 @@ void TypeFeedbackVector::ClearICSlotsImpl(SharedFunctionInfo* shared,
}
+// static
+Handle<TypeFeedbackVector> TypeFeedbackVector::DummyVector(Isolate* isolate) {
+ return Handle<TypeFeedbackVector>::cast(isolate->factory()->dummy_vector());
+}
+
+
Handle<FixedArray> FeedbackNexus::EnsureArrayOfSize(int length) {
Isolate* isolate = GetIsolate();
Handle<Object> feedback = handle(GetFeedback(), isolate);
diff --git a/deps/v8/src/type-feedback-vector.h b/deps/v8/src/type-feedback-vector.h
index a6f72210fc..3c4c5e680a 100644
--- a/deps/v8/src/type-feedback-vector.h
+++ b/deps/v8/src/type-feedback-vector.h
@@ -19,24 +19,25 @@ namespace internal {
class FeedbackVectorSpec {
public:
- FeedbackVectorSpec() : slots_(0), has_ic_slot_(false) {}
- explicit FeedbackVectorSpec(int slots) : slots_(slots), has_ic_slot_(false) {}
- FeedbackVectorSpec(int slots, Code::Kind ic_slot_kind)
- : slots_(slots), has_ic_slot_(true), ic_kind_(ic_slot_kind) {}
+ FeedbackVectorSpec() : slots_(0), ic_slots_(0), ic_kinds_(NULL) {}
+ explicit FeedbackVectorSpec(int slots)
+ : slots_(slots), ic_slots_(0), ic_kinds_(NULL) {}
+ FeedbackVectorSpec(int slots, int ic_slots, Code::Kind* ic_slot_kinds)
+ : slots_(slots), ic_slots_(ic_slots), ic_kinds_(ic_slot_kinds) {}
int slots() const { return slots_; }
- int ic_slots() const { return has_ic_slot_ ? 1 : 0; }
+ int ic_slots() const { return ic_slots_; }
Code::Kind GetKind(int ic_slot) const {
- DCHECK(has_ic_slot_ && ic_slot == 0);
- return ic_kind_;
+ DCHECK(ic_slots_ > 0 && ic_slot < ic_slots_);
+ return ic_kinds_[ic_slot];
}
private:
int slots_;
- bool has_ic_slot_;
- Code::Kind ic_kind_;
+ int ic_slots_;
+ Code::Kind* ic_kinds_;
};
@@ -192,6 +193,13 @@ class TypeFeedbackVector : public FixedArray {
static Handle<TypeFeedbackVector> Copy(Isolate* isolate,
Handle<TypeFeedbackVector> vector);
+#ifdef OBJECT_PRINT
+ // For gdb debugging.
+ void Print();
+#endif // OBJECT_PRINT
+
+ DECLARE_PRINTER(TypeFeedbackVector)
+
// Clears the vector slots and the vector ic slots.
void ClearSlots(SharedFunctionInfo* shared) { ClearSlotsImpl(shared, true); }
void ClearSlotsAtGCTime(SharedFunctionInfo* shared) {
@@ -218,6 +226,21 @@ class TypeFeedbackVector : public FixedArray {
// garbage collection (e.g., for patching the cache).
static inline Object* RawUninitializedSentinel(Heap* heap);
+ static const int kDummyLoadICSlot = 0;
+ static const int kDummyKeyedLoadICSlot = 1;
+ static const int kDummyStoreICSlot = 2;
+ static const int kDummyKeyedStoreICSlot = 3;
+
+ static Handle<TypeFeedbackVector> DummyVector(Isolate* isolate);
+ static FeedbackVectorICSlot DummySlot(int dummyIndex) {
+ DCHECK(dummyIndex >= 0 && dummyIndex <= kDummyKeyedStoreICSlot);
+ return FeedbackVectorICSlot(dummyIndex);
+ }
+
+ static int PushAppliedArgumentsIndex();
+ static Handle<TypeFeedbackVector> CreatePushAppliedArgumentsVector(
+ Isolate* isolate);
+
private:
enum VectorICKind {
KindUnused = 0x0,
@@ -379,6 +402,10 @@ class LoadICNexus : public FeedbackNexus {
: FeedbackNexus(vector, slot) {
DCHECK(vector->GetKind(slot) == Code::LOAD_IC);
}
+ explicit LoadICNexus(Isolate* isolate)
+ : FeedbackNexus(TypeFeedbackVector::DummyVector(isolate),
+ TypeFeedbackVector::DummySlot(
+ TypeFeedbackVector::kDummyLoadICSlot)) {}
LoadICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK(vector->GetKind(slot) == Code::LOAD_IC);
@@ -425,6 +452,10 @@ class StoreICNexus : public FeedbackNexus {
: FeedbackNexus(vector, slot) {
DCHECK(vector->GetKind(slot) == Code::STORE_IC);
}
+ explicit StoreICNexus(Isolate* isolate)
+ : FeedbackNexus(TypeFeedbackVector::DummyVector(isolate),
+ TypeFeedbackVector::DummySlot(
+ TypeFeedbackVector::kDummyStoreICSlot)) {}
StoreICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK(vector->GetKind(slot) == Code::STORE_IC);
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index ba983d63a8..1dbe21c9fa 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -82,7 +82,8 @@ Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackVectorICSlot slot) {
obj = cell->value();
}
- if (obj->IsJSFunction() || obj->IsAllocationSite() || obj->IsSymbol()) {
+ if (obj->IsJSFunction() || obj->IsAllocationSite() || obj->IsSymbol() ||
+ obj->IsSimd128Value()) {
return Handle<Object>(obj, isolate());
}
@@ -105,13 +106,15 @@ InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(TypeFeedbackId id) {
InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(
FeedbackVectorICSlot slot) {
- Code::Kind kind = feedback_vector_->GetKind(slot);
- if (kind == Code::LOAD_IC) {
- LoadICNexus nexus(feedback_vector_, slot);
- return nexus.StateFromFeedback();
- } else if (kind == Code::KEYED_LOAD_IC) {
- KeyedLoadICNexus nexus(feedback_vector_, slot);
- return nexus.StateFromFeedback();
+ if (!slot.IsInvalid()) {
+ Code::Kind kind = feedback_vector_->GetKind(slot);
+ if (kind == Code::LOAD_IC) {
+ LoadICNexus nexus(feedback_vector_, slot);
+ return nexus.StateFromFeedback();
+ } else if (kind == Code::KEYED_LOAD_IC) {
+ KeyedLoadICNexus nexus(feedback_vector_, slot);
+ return nexus.StateFromFeedback();
+ }
}
// If we can't find an IC, assume we've seen *something*, but we don't know
@@ -330,9 +333,11 @@ void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackVectorICSlot slot,
Handle<Name> name,
SmallMapList* receiver_types) {
receiver_types->Clear();
- LoadICNexus nexus(feedback_vector_, slot);
- Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
- CollectReceiverTypes(&nexus, name, flags, receiver_types);
+ if (!slot.IsInvalid()) {
+ LoadICNexus nexus(feedback_vector_, slot);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+ CollectReceiverTypes(&nexus, name, flags, receiver_types);
+ }
}
@@ -340,10 +345,15 @@ void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
FeedbackVectorICSlot slot, SmallMapList* receiver_types, bool* is_string,
IcCheckType* key_type) {
receiver_types->Clear();
- KeyedLoadICNexus nexus(feedback_vector_, slot);
- CollectReceiverTypes<FeedbackNexus>(&nexus, receiver_types);
- *is_string = HasOnlyStringMaps(receiver_types);
- *key_type = nexus.FindFirstName() != NULL ? PROPERTY : ELEMENT;
+ if (slot.IsInvalid()) {
+ *is_string = false;
+ *key_type = ELEMENT;
+ } else {
+ KeyedLoadICNexus nexus(feedback_vector_, slot);
+ CollectReceiverTypes<FeedbackNexus>(&nexus, receiver_types);
+ *is_string = HasOnlyStringMaps(receiver_types);
+ *key_type = nexus.FindFirstName() != NULL ? PROPERTY : ELEMENT;
+ }
}
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js
index ce52cdf15c..d1f7ed1fdb 100644
--- a/deps/v8/src/typedarray.js
+++ b/deps/v8/src/typedarray.js
@@ -37,10 +37,12 @@ TYPED_ARRAYS(DECLARE_GLOBALS)
var MathMax;
var MathMin;
+var ToNumber;
utils.Import(function(from) {
MathMax = from.MathMax;
MathMin = from.MathMin;
+ ToNumber = from.ToNumber;
});
var InternalArray = utils.InternalArray;
@@ -330,6 +332,7 @@ function TypedArraySet(obj, offset) {
}
return;
}
+ l = $toLength(l);
if (intOffset + l > this.length) {
throw MakeRangeError(kTypedArraySetSourceTooLarge);
}
diff --git a/deps/v8/src/types-inl.h b/deps/v8/src/types-inl.h
index 762a11df30..084f5db812 100644
--- a/deps/v8/src/types-inl.h
+++ b/deps/v8/src/types-inl.h
@@ -69,13 +69,6 @@ bool TypeImpl<Config>::NowContains(i::Object* value) {
// static
template<class T>
-T* ZoneTypeConfig::null_handle() {
- return NULL;
-}
-
-
-// static
-template<class T>
T* ZoneTypeConfig::handle(T* type) {
return type;
}
@@ -283,13 +276,6 @@ void ZoneTypeConfig::range_set_double(ZoneTypeConfig::Range* range, int index,
// static
template<class T>
-i::Handle<T> HeapTypeConfig::null_handle() {
- return i::Handle<T>();
-}
-
-
-// static
-template<class T>
i::Handle<T> HeapTypeConfig::handle(T* type) {
return i::handle(type, i::HeapObject::cast(type)->GetIsolate());
}
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index 1c6b84e2dc..a904c6eefa 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -228,6 +228,9 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
}
case HEAP_NUMBER_TYPE:
return kNumber & kTaggedPointer;
+ case SIMD128_VALUE_TYPE:
+ // TODO(bbudge): Add type bits for SIMD value types.
+ return kAny;
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
case JS_OBJECT_TYPE:
@@ -273,6 +276,7 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case ACCESSOR_PAIR_TYPE:
case FIXED_ARRAY_TYPE:
case BYTE_ARRAY_TYPE:
+ case BYTECODE_ARRAY_TYPE:
case FOREIGN_TYPE:
case SCRIPT_TYPE:
case CODE_TYPE:
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index 8d63908015..31ee95cbb4 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -6,8 +6,8 @@
#define V8_TYPES_H_
#include "src/conversions.h"
-#include "src/factory.h"
#include "src/handles.h"
+#include "src/objects.h"
#include "src/ostreams.h"
namespace v8 {
@@ -1003,7 +1003,7 @@ struct ZoneTypeConfig {
static const int kRangeStructTag = 0x1000;
- template<class T> static inline T* null_handle();
+ template<class T> static inline T* null_handle() { return nullptr; }
template<class T> static inline T* handle(T* type);
template<class T> static inline T* cast(Type* type);
@@ -1058,7 +1058,9 @@ struct HeapTypeConfig {
static const int kRangeStructTag = 0xffff;
- template<class T> static inline i::Handle<T> null_handle();
+ template<class T> static inline i::Handle<T> null_handle() {
+ return i::Handle<T>();
+ }
template<class T> static inline i::Handle<T> handle(T* type);
template<class T> static inline i::Handle<T> cast(i::Handle<Type> type);
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc
index 2f10328f09..204ace6c96 100644
--- a/deps/v8/src/typing.cc
+++ b/deps/v8/src/typing.cc
@@ -9,6 +9,7 @@
#include "src/ostreams.h"
#include "src/parser.h" // for CompileTimeValue; TODO(rossberg): should move
#include "src/scopes.h"
+#include "src/splay-tree-inl.h"
namespace v8 {
namespace internal {
@@ -36,13 +37,8 @@ void AstTyper::Run(CompilationInfo* info) {
AstTyper* visitor = new(info->zone()) AstTyper(info);
Scope* scope = info->scope();
- // Handle implicit declaration of the function name in named function
- // expressions before other declarations.
- if (scope->is_function_scope() && scope->function() != NULL) {
- RECURSE(visitor->VisitVariableDeclaration(scope->function()));
- }
RECURSE(visitor->VisitDeclarations(scope->declarations()));
- RECURSE(visitor->VisitStatements(info->function()->body()));
+ RECURSE(visitor->VisitStatements(info->literal()->body()));
}
#undef RECURSE
diff --git a/deps/v8/src/typing.h b/deps/v8/src/typing.h
index f9442e6007..f3ead18f99 100644
--- a/deps/v8/src/typing.h
+++ b/deps/v8/src/typing.h
@@ -5,8 +5,6 @@
#ifndef V8_TYPING_H_
#define V8_TYPING_H_
-#include "src/v8.h"
-
#include "src/allocation.h"
#include "src/ast.h"
#include "src/effects.h"
diff --git a/deps/v8/src/unique.h b/deps/v8/src/unique.h
index 61d57d01ff..68fb86956c 100644
--- a/deps/v8/src/unique.h
+++ b/deps/v8/src/unique.h
@@ -8,7 +8,7 @@
#include <ostream> // NOLINT(readability/streams)
#include "src/base/functional.h"
-#include "src/handles-inl.h" // TODO(everyone): Fix our inl.h crap
+#include "src/handles.h"
#include "src/objects-inl.h" // TODO(everyone): Fix our inl.h crap
#include "src/utils.h"
#include "src/zone.h"
diff --git a/deps/v8/src/uri.js b/deps/v8/src/uri.js
index 4566a7cc96..bf3270f1d0 100644
--- a/deps/v8/src/uri.js
+++ b/deps/v8/src/uri.js
@@ -17,6 +17,11 @@
var GlobalObject = global.Object;
var GlobalArray = global.Array;
var InternalArray = utils.InternalArray;
+var ToString;
+
+utils.Import(function(from) {
+ ToString = from.ToString;
+});
// -------------------------------------------------------------------
// Define internal helper functions.
@@ -274,13 +279,13 @@ function Decode(uri, reserved) {
// ECMA-262 - B.2.1.
function URIEscapeJS(str) {
- var s = $toString(str);
+ var s = ToString(str);
return %URIEscape(s);
}
// ECMA-262 - B.2.2.
function URIUnescapeJS(str) {
- var s = $toString(str);
+ var s = ToString(str);
return %URIUnescape(s);
}
@@ -304,14 +309,14 @@ function URIDecode(uri) {
return false;
};
- var string = $toString(uri);
+ var string = ToString(uri);
return Decode(string, reservedPredicate);
}
// ECMA-262 - 15.1.3.2.
function URIDecodeComponent(component) {
var reservedPredicate = function(cc) { return false; };
- var string = $toString(component);
+ var string = ToString(component);
return Decode(string, reservedPredicate);
}
@@ -338,7 +343,7 @@ function URIEncode(uri) {
return false;
};
- var string = $toString(uri);
+ var string = ToString(uri);
return Encode(string, unescapePredicate);
}
@@ -359,7 +364,7 @@ function URIEncodeComponent(component) {
return false;
};
- var string = $toString(component);
+ var string = ToString(component);
return Encode(string, unescapePredicate);
}
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index 9f502bde3e..bbfdc74ad2 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/utils.h"
+
#include <stdarg.h>
#include <sys/stat.h>
-#include "src/v8.h"
-
#include "src/base/functional.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
-#include "src/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index fcffebb344..bd6f2c2b28 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -8,11 +8,10 @@
#include "src/base/once.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
#include "src/frames.h"
-#include "src/heap/store-buffer.h"
#include "src/heap-profiler.h"
#include "src/hydrogen.h"
#include "src/isolate.h"
@@ -35,7 +34,6 @@ V8_DECLARE_ONCE(init_natives_once);
V8_DECLARE_ONCE(init_snapshot_once);
#endif
-v8::ArrayBuffer::Allocator* V8::array_buffer_allocator_ = NULL;
v8::Platform* V8::platform_ = NULL;
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index 211f3c6141..23e1a1230c 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -27,23 +27,10 @@
// Basic includes
#include "include/v8.h"
-#include "include/v8-platform.h"
-#include "src/checks.h" // NOLINT
-#include "src/allocation.h" // NOLINT
-#include "src/assert-scope.h" // NOLINT
-#include "src/utils.h" // NOLINT
-#include "src/flags.h" // NOLINT
-#include "src/globals.h" // NOLINT
-
-// Objects & heap
-#include "src/objects-inl.h" // NOLINT
-#include "src/heap/spaces-inl.h" // NOLINT
-#include "src/heap/heap-inl.h" // NOLINT
-#include "src/heap/incremental-marking-inl.h" // NOLINT
-#include "src/heap/mark-compact-inl.h" // NOLINT
-#include "src/log-inl.h" // NOLINT
-#include "src/handles-inl.h" // NOLINT
-#include "src/types-inl.h" // NOLINT
+#include "src/allocation.h"
+
+// Objects
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -69,15 +56,6 @@ class V8 : public AllStatic {
// Support for entry hooking JITed code.
static void SetFunctionEntryHook(FunctionEntryHook entry_hook);
- static v8::ArrayBuffer::Allocator* ArrayBufferAllocator() {
- return array_buffer_allocator_;
- }
-
- static void SetArrayBufferAllocator(v8::ArrayBuffer::Allocator *allocator) {
- CHECK_NULL(array_buffer_allocator_);
- array_buffer_allocator_ = allocator;
- }
-
static void InitializePlatform(v8::Platform* platform);
static void ShutdownPlatform();
static v8::Platform* GetCurrentPlatform();
@@ -89,17 +67,10 @@ class V8 : public AllStatic {
static void InitializeOncePerProcessImpl();
static void InitializeOncePerProcess();
- // Allocator for external array buffers.
- static v8::ArrayBuffer::Allocator* array_buffer_allocator_;
// v8::Platform to use.
static v8::Platform* platform_;
};
-
-// JavaScript defines two kinds of 'nil'.
-enum NilValue { kNullValue, kUndefinedValue };
-
-
} } // namespace v8::internal
#endif // V8_V8_H_
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 92769e25d2..93636d008f 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -2,12 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $functionSourceString;
-var $globalEval;
-var $objectDefineOwnProperty;
-var $objectGetOwnPropertyDescriptor;
-var $toCompletePropertyDescriptor;
-
(function(global, utils) {
%CheckIsBootstrapping();
@@ -21,16 +15,24 @@ var GlobalFunction = global.Function;
var GlobalNumber = global.Number;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
-
var MathAbs;
var ProxyDelegateCallAndConstruct;
var ProxyDerivedHasOwnTrap;
var ProxyDerivedKeysTrap;
var StringIndexOf;
+var ToBoolean;
+var ToNumber;
+var ToString;
utils.Import(function(from) {
MathAbs = from.MathAbs;
StringIndexOf = from.StringIndexOf;
+ ToString = from.ToString;
+});
+
+utils.ImportNow(function(from) {
+ ToBoolean = from.ToBoolean;
+ ToNumber = from.ToNumber;
});
utils.ImportFromExperimental(function(from) {
@@ -139,9 +141,9 @@ utils.InstallFunctions(global, DONT_ENUM, [
// ECMA-262 - 15.2.4.2
function ObjectToString() {
- if (IS_UNDEFINED(this) && !IS_UNDETECTABLE(this)) return "[object Undefined]";
+ if (IS_UNDEFINED(this)) return "[object Undefined]";
if (IS_NULL(this)) return "[object Null]";
- var O = TO_OBJECT_INLINE(this);
+ var O = TO_OBJECT(this);
var builtinTag = %_ClassOf(O);
var tag;
@@ -168,14 +170,14 @@ function ObjectToLocaleString() {
// ECMA-262 - 15.2.4.4
function ObjectValueOf() {
- return TO_OBJECT_INLINE(this);
+ return TO_OBJECT(this);
}
// ECMA-262 - 15.2.4.5
function ObjectHasOwnProperty(value) {
var name = $toName(value);
- var object = TO_OBJECT_INLINE(this);
+ var object = TO_OBJECT(this);
if (%_IsJSProxy(object)) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
@@ -206,14 +208,14 @@ function ObjectPropertyIsEnumerable(V) {
var desc = GetOwnPropertyJS(this, P);
return IS_UNDEFINED(desc) ? false : desc.isEnumerable();
}
- return %IsPropertyEnumerable(TO_OBJECT_INLINE(this), P);
+ return %IsPropertyEnumerable(TO_OBJECT(this), P);
}
// Extensions for providing property getters and setters.
function ObjectDefineGetter(name, fun) {
var receiver = this;
- if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+ if (IS_NULL(receiver) || IS_UNDEFINED(receiver)) {
receiver = %GlobalProxy(ObjectDefineGetter);
}
if (!IS_SPEC_FUNCTION(fun)) {
@@ -223,22 +225,22 @@ function ObjectDefineGetter(name, fun) {
desc.setGet(fun);
desc.setEnumerable(true);
desc.setConfigurable(true);
- DefineOwnProperty(TO_OBJECT_INLINE(receiver), $toName(name), desc, false);
+ DefineOwnProperty(TO_OBJECT(receiver), $toName(name), desc, false);
}
function ObjectLookupGetter(name) {
var receiver = this;
- if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+ if (IS_NULL(receiver) || IS_UNDEFINED(receiver)) {
receiver = %GlobalProxy(ObjectLookupGetter);
}
- return %LookupAccessor(TO_OBJECT_INLINE(receiver), $toName(name), GETTER);
+ return %LookupAccessor(TO_OBJECT(receiver), $toName(name), GETTER);
}
function ObjectDefineSetter(name, fun) {
var receiver = this;
- if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+ if (IS_NULL(receiver) || IS_UNDEFINED(receiver)) {
receiver = %GlobalProxy(ObjectDefineSetter);
}
if (!IS_SPEC_FUNCTION(fun)) {
@@ -248,21 +250,21 @@ function ObjectDefineSetter(name, fun) {
desc.setSet(fun);
desc.setEnumerable(true);
desc.setConfigurable(true);
- DefineOwnProperty(TO_OBJECT_INLINE(receiver), $toName(name), desc, false);
+ DefineOwnProperty(TO_OBJECT(receiver), $toName(name), desc, false);
}
function ObjectLookupSetter(name) {
var receiver = this;
- if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+ if (IS_NULL(receiver) || IS_UNDEFINED(receiver)) {
receiver = %GlobalProxy(ObjectLookupSetter);
}
- return %LookupAccessor(TO_OBJECT_INLINE(receiver), $toName(name), SETTER);
+ return %LookupAccessor(TO_OBJECT(receiver), $toName(name), SETTER);
}
function ObjectKeys(obj) {
- obj = TO_OBJECT_INLINE(obj);
+ obj = TO_OBJECT(obj);
if (%_IsJSProxy(obj)) {
var handler = %GetHandler(obj);
var names = CallTrap0(handler, "keys", ProxyDerivedKeysTrap);
@@ -350,11 +352,11 @@ function ToPropertyDescriptor(obj) {
var desc = new PropertyDescriptor();
if ("enumerable" in obj) {
- desc.setEnumerable($toBoolean(obj.enumerable));
+ desc.setEnumerable(ToBoolean(obj.enumerable));
}
if ("configurable" in obj) {
- desc.setConfigurable($toBoolean(obj.configurable));
+ desc.setConfigurable(ToBoolean(obj.configurable));
}
if ("value" in obj) {
@@ -362,7 +364,7 @@ function ToPropertyDescriptor(obj) {
}
if ("writable" in obj) {
- desc.setWritable($toBoolean(obj.writable));
+ desc.setWritable(ToBoolean(obj.writable));
}
if ("get" in obj) {
@@ -579,7 +581,7 @@ function GetOwnPropertyJS(obj, v) {
// GetOwnProperty returns an array indexed by the constants
// defined in macros.py.
// If p is not a property on obj undefined is returned.
- var props = %GetOwnProperty(TO_OBJECT_INLINE(obj), p);
+ var props = %GetOwnProperty(TO_OBJECT(obj), p);
return ConvertDescriptorArrayToDescriptor(props);
}
@@ -590,7 +592,7 @@ function Delete(obj, p, should_throw) {
var desc = GetOwnPropertyJS(obj, p);
if (IS_UNDEFINED(desc)) return true;
if (desc.isConfigurable()) {
- %DeleteProperty(obj, p, 0);
+ %DeleteProperty_Sloppy(obj, p);
return true;
} else if (should_throw) {
throw MakeTypeError(kDefineDisallowed, p);
@@ -616,7 +618,7 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
var handler = %GetHandler(obj);
var result = CallTrap2(handler, "defineProperty", UNDEFINED, p, attributes);
- if (!$toBoolean(result)) {
+ if (!ToBoolean(result)) {
if (should_throw) {
throw MakeTypeError(kProxyHandlerReturned,
handler, "false", "defineProperty");
@@ -803,9 +805,9 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
function DefineArrayProperty(obj, p, desc, should_throw) {
// Step 3 - Special handling for array index.
if (!IS_SYMBOL(p)) {
- var index = $toUint32(p);
+ var index = TO_UINT32(p);
var emit_splice = false;
- if ($toString(index) == p && index != 4294967295) {
+ if (ToString(index) == p && index != 4294967295) {
var length = obj.length;
if (index >= length && %IsObserved(obj)) {
emit_splice = true;
@@ -868,7 +870,7 @@ function DefineOwnPropertyFromAPI(obj, p, value, desc) {
// ES6 section 19.1.2.9
function ObjectGetPrototypeOf(obj) {
- return %_GetPrototype(TO_OBJECT_INLINE(obj));
+ return %_GetPrototype(TO_OBJECT(obj));
}
// ES6 section 19.1.2.19.
@@ -889,7 +891,7 @@ function ObjectSetPrototypeOf(obj, proto) {
// ES6 section 19.1.2.6
function ObjectGetOwnPropertyDescriptor(obj, p) {
- var desc = GetOwnPropertyJS(TO_OBJECT_INLINE(obj), p);
+ var desc = GetOwnPropertyJS(TO_OBJECT(obj), p);
return FromPropertyDescriptor(desc);
}
@@ -899,7 +901,7 @@ function ToNameArray(obj, trap, includeSymbols) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError(kProxyNonObjectPropNames, trap, obj);
}
- var n = $toUint32(obj.length);
+ var n = TO_UINT32(obj.length);
var array = new GlobalArray(n);
var realLength = 0;
var names = { __proto__: null }; // TODO(rossberg): use sets once ready.
@@ -973,7 +975,7 @@ function ObjectGetOwnPropertyKeys(obj, filter) {
}
} else {
if (filter & PROPERTY_ATTRIBUTES_STRING) continue;
- name = $toString(name);
+ name = ToString(name);
}
if (seenKeys[name]) continue;
seenKeys[name] = true;
@@ -1001,7 +1003,7 @@ function OwnPropertyKeys(obj) {
// ES5 section 15.2.3.4.
function ObjectGetOwnPropertyNames(obj) {
- obj = TO_OBJECT_INLINE(obj);
+ obj = TO_OBJECT(obj);
// Special handling for proxies.
if (%_IsJSProxy(obj)) {
var handler = %GetHandler(obj);
@@ -1093,7 +1095,7 @@ function ObjectDefineProperties(obj, properties) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError(kCalledOnNonObject, "Object.defineProperties");
}
- var props = TO_OBJECT_INLINE(properties);
+ var props = TO_OBJECT(properties);
var names = GetOwnEnumerablePropertyNames(props);
var descriptors = new InternalArray();
for (var i = 0; i < names.length; i++) {
@@ -1263,7 +1265,7 @@ function ObjectIs(obj1, obj2) {
// ECMA-262, Edition 6, section B.2.2.1.1
function ObjectGetProto() {
- return %_GetPrototype(TO_OBJECT_INLINE(this));
+ return %_GetPrototype(TO_OBJECT(this));
}
@@ -1280,10 +1282,10 @@ function ObjectSetProto(proto) {
function ObjectConstructor(x) {
if (%_IsConstructCall()) {
if (x == null) return this;
- return TO_OBJECT_INLINE(x);
+ return TO_OBJECT(x);
} else {
if (x == null) return { };
- return TO_OBJECT_INLINE(x);
+ return TO_OBJECT(x);
}
}
@@ -1341,9 +1343,9 @@ utils.InstallFunctions(GlobalObject, DONT_ENUM, [
function BooleanConstructor(x) {
if (%_IsConstructCall()) {
- %_SetValueOf(this, $toBoolean(x));
+ %_SetValueOf(this, ToBoolean(x));
} else {
- return $toBoolean(x);
+ return ToBoolean(x);
}
}
@@ -1389,7 +1391,7 @@ utils.InstallFunctions(GlobalBoolean.prototype, DONT_ENUM, [
// Number
function NumberConstructor(x) {
- var value = %_ArgumentsLength() == 0 ? 0 : $toNumber(x);
+ var value = %_ArgumentsLength() == 0 ? 0 : ToNumber(x);
if (%_IsConstructCall()) {
%_SetValueOf(this, value);
} else {
@@ -1502,7 +1504,7 @@ function NumberToPrecisionJS(precision) {
// Get the value of this number in case it's an object.
x = %_ValueOf(this);
}
- if (IS_UNDEFINED(precision)) return $toString(%_ValueOf(this));
+ if (IS_UNDEFINED(precision)) return ToString(%_ValueOf(this));
var p = TO_INTEGER(precision);
if (NUMBER_IS_NAN(x)) return "NaN";
@@ -1722,9 +1724,9 @@ function NewFunctionString(args, function_token) {
var n = args.length;
var p = '';
if (n > 1) {
- p = $toString(args[0]);
+ p = ToString(args[0]);
for (var i = 1; i < n - 1; i++) {
- p += ',' + $toString(args[i]);
+ p += ',' + ToString(args[i]);
}
// If the formal parameters string include ) - an illegal
// character - it may make the combined function expression
@@ -1737,7 +1739,7 @@ function NewFunctionString(args, function_token) {
// comments we can include a trailing block comment to catch this.
p += '\n/' + '**/';
}
- var body = (n > 0) ? $toString(args[n - 1]) : '';
+ var body = (n > 0) ? ToString(args[n - 1]) : '';
return '(' + function_token + '(' + p + ') {\n' + body + '\n})';
}
@@ -1786,23 +1788,16 @@ function GetIterator(obj, method) {
// ----------------------------------------------------------------------------
// Exports
-$functionSourceString = FunctionSourceString;
-$globalEval = GlobalEval;
-$objectDefineOwnProperty = DefineOwnPropertyFromAPI;
-$objectGetOwnPropertyDescriptor = ObjectGetOwnPropertyDescriptor;
-$toCompletePropertyDescriptor = ToCompletePropertyDescriptor;
-
-utils.ObjectDefineProperties = ObjectDefineProperties;
-utils.ObjectDefineProperty = ObjectDefineProperty;
-
utils.Export(function(to) {
to.Delete = Delete;
+ to.FunctionSourceString = FunctionSourceString;
to.GetIterator = GetIterator;
to.GetMethod = GetMethod;
to.IsFinite = GlobalIsFinite;
to.IsNaN = GlobalIsNaN;
to.NewFunctionString = NewFunctionString;
to.NumberIsNaN = NumberIsNaN;
+ to.ObjectDefineProperties = ObjectDefineProperties;
to.ObjectDefineProperty = ObjectDefineProperty;
to.ObjectFreeze = ObjectFreezeJS;
to.ObjectGetOwnPropertyKeys = ObjectGetOwnPropertyKeys;
@@ -1814,4 +1809,11 @@ utils.Export(function(to) {
to.ToNameArray = ToNameArray;
});
+utils.ExportToRuntime(function(to) {
+ to.GlobalEval = GlobalEval;
+ to.ObjectDefineOwnProperty = DefineOwnPropertyFromAPI;
+ to.ObjectGetOwnPropertyDescriptor = ObjectGetOwnPropertyDescriptor;
+ to.ToCompletePropertyDescriptor = ToCompletePropertyDescriptor;
+});
+
})
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index e2d9d49fe1..45c1c8a25c 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -6,9 +6,9 @@
#include "src/api.h"
#include "src/bootstrapper.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/execution.h"
-#include "src/regexp-stack.h"
+#include "src/regexp/regexp-stack.h"
#include "src/v8threads.h"
namespace v8 {
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index 18a45abd73..f4f7a7a917 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/variables.h"
#include "src/ast.h"
#include "src/scopes.h"
-#include "src/variables.h"
namespace v8 {
namespace internal {
@@ -24,7 +23,6 @@ const char* Variable::Mode2String(VariableMode mode) {
case DYNAMIC: return "DYNAMIC";
case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
case DYNAMIC_LOCAL: return "DYNAMIC_LOCAL";
- case INTERNAL: return "INTERNAL";
case TEMPORARY: return "TEMPORARY";
}
UNREACHABLE();
@@ -58,7 +56,9 @@ Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
bool Variable::IsGlobalObjectProperty() const {
// Temporaries are never global, they must always be allocated in the
// activation frame.
- return IsDynamicVariableMode(mode_) || IsStaticGlobalObjectProperty();
+ return (IsDynamicVariableMode(mode_) ||
+ (IsDeclaredVariableMode(mode_) && !IsLexicalVariableMode(mode_))) &&
+ scope_ != NULL && scope_->is_script_scope() && !is_this();
}
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index deebc5f80c..dcd2e6af6e 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -44,7 +44,6 @@ class Variable: public ZoneObject {
return force_context_allocation_;
}
void ForceContextAllocation() {
- DCHECK(mode_ != TEMPORARY);
force_context_allocation_ = true;
}
bool is_used() { return is_used_; }
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 90deaba4fe..74d5d2436a 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -8,7 +8,7 @@
#include "src/x64/assembler-x64.h"
#include "src/base/cpu.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/v8memory.h"
namespace v8 {
@@ -293,11 +293,6 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
-Address Assembler::break_address_from_return_address(Address pc) {
- return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
-}
-
-
Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
return code_targets_[Memory::int32_at(pc)];
}
@@ -311,21 +306,17 @@ Address Assembler::runtime_entry_at(Address pc) {
// Implementation of RelocInfo
// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
- bool flush_icache = icache_flush_mode != SKIP_ICACHE_FLUSH;
- if (IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- Memory::Address_at(pc_) += delta;
- if (flush_icache) CpuFeatures::FlushICache(pc_, sizeof(Address));
- } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
+void RelocInfo::apply(intptr_t delta) {
+ if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
- if (flush_icache) CpuFeatures::FlushICache(pc_, sizeof(int32_t));
- } else if (rmode_ == CODE_AGE_SEQUENCE) {
+ } else if (IsCodeAgeSequence(rmode_)) {
if (*pc_ == kCallOpcode) {
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= static_cast<int32_t>(delta); // Relocate entry.
- if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
}
+ } else if (IsInternalReference(rmode_)) {
+ // absolute code pointer inside code object moves with the code object.
+ Memory::Address_at(pc_) += delta;
}
}
@@ -526,21 +517,18 @@ void RelocInfo::set_code_age_stub(Code* stub,
}
-Address RelocInfo::call_address() {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Memory::Address_at(
- pc_ + Assembler::kRealPatchReturnSequenceAddressOffset);
+Address RelocInfo::debug_call_address() {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+ return Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset);
}
-void RelocInfo::set_call_address(Address target) {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) =
+void RelocInfo::set_debug_call_address(Address target) {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+ Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset) =
target;
- CpuFeatures::FlushICache(
- pc_ + Assembler::kRealPatchReturnSequenceAddressOffset, sizeof(Address));
+ CpuFeatures::FlushICache(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset,
+ sizeof(Address));
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -549,24 +537,6 @@ void RelocInfo::set_call_address(Address target) {
}
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-Object** RelocInfo::call_object_address() {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(
- pc_ + Assembler::kPatchReturnSequenceAddressOffset);
-}
-
-
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
@@ -582,11 +552,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- isolate->debug()->has_break_points()) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
@@ -610,11 +577,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 15c531960f..47e4d2bdda 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -556,9 +556,6 @@ class Assembler : public AssemblerBase {
// of that call in the instruction stream.
static inline Address target_address_from_return_address(Address pc);
- // Return the code target address of the patch debug break slot
- inline static Address break_address_from_return_address(Address pc);
-
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -599,23 +596,11 @@ class Assembler : public AssemblerBase {
kMoveAddressIntoScratchRegisterInstructionLength +
kCallScratchRegisterInstructionLength;
- // The js return and debug break slot must be able to contain an indirect
- // call sequence, some x64 JS code is padded with int3 to make it large
- // enough to hold an instruction when the debugger patches it.
- static const int kJSReturnSequenceLength = kCallSequenceLength;
+ // The debug break slot must be able to contain an indirect call sequence.
static const int kDebugBreakSlotLength = kCallSequenceLength;
- static const int kPatchDebugBreakSlotReturnOffset = kCallTargetAddressOffset;
- // Distance between the start of the JS return sequence and where the
- // 32-bit displacement of a short call would be. The short call is from
- // SetDebugBreakAtIC from debug-x64.cc.
- static const int kPatchReturnSequenceAddressOffset =
- kJSReturnSequenceLength - kPatchDebugBreakSlotReturnOffset;
- // Distance between the start of the JS return sequence and where the
- // 32-bit displacement of a short call would be. The short call is from
- // SetDebugBreakAtIC from debug-x64.cc.
+ // Distance between start of patched debug break slot and the emitted address
+ // to jump to.
static const int kPatchDebugBreakSlotAddressOffset =
- kDebugBreakSlotLength - kPatchDebugBreakSlotReturnOffset;
- static const int kRealPatchReturnSequenceAddressOffset =
kMoveAddressIntoScratchRegisterInstructionLength - kPointerSize;
// One byte opcode for test eax,0xXXXXXXXX.
@@ -1614,11 +1599,11 @@ class Assembler : public AssemblerBase {
return pc_offset() - label->pos();
}
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
+ // Mark generator continuation.
+ void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot();
+ void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index fa957a1b67..421368a5d1 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/code-factory.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
namespace v8 {
namespace internal {
@@ -99,44 +97,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
-static void Generate_Runtime_NewObject(MacroAssembler* masm,
- bool create_memento,
- Register original_constructor,
- Label* count_incremented,
- Label* allocated) {
- int offset = 0;
- if (create_memento) {
- // Get the cell or allocation site.
- __ movp(rdi, Operand(rsp, kPointerSize * 2));
- __ Push(rdi);
- offset = kPointerSize;
- }
-
- // Must restore rsi (context) and rdi (constructor) before calling runtime.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movp(rdi, Operand(rsp, offset));
- __ Push(rdi);
- __ Push(original_constructor);
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- } else {
- __ CallRuntime(Runtime::kNewObject, 2);
- }
- __ movp(rbx, rax); // store result in rbx
-
- // Runtime_NewObjectWithAllocationSite increments allocation count.
- // Skip the increment.
- if (create_memento) {
- __ jmp(count_incremented);
- } else {
- __ jmp(allocated);
- }
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- rax: number of arguments
@@ -152,38 +114,28 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
- if (create_memento) {
- __ AssertUndefinedOrAllocationSite(rbx);
- __ Push(rbx);
- }
-
// Preserve the incoming parameters on the stack.
+ __ AssertUndefinedOrAllocationSite(rbx);
+ __ Push(rbx);
__ Integer32ToSmi(rax, rax);
__ Push(rax);
__ Push(rdi);
- if (use_new_target) {
- __ Push(rdx);
- }
-
- Label rt_call, normal_new, allocated, count_incremented;
- __ cmpp(rdx, rdi);
- __ j(equal, &normal_new);
-
- Generate_Runtime_NewObject(masm, create_memento, rdx, &count_incremented,
- &allocated);
+ __ Push(rdx);
- __ bind(&normal_new);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
if (FLAG_inline_new) {
- Label undo_allocation;
-
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
__ Move(kScratchRegister, debug_step_in_fp);
__ cmpp(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
+ // Fall back to runtime if the original constructor and function differ.
+ __ cmpp(rdx, rdi);
+ __ j(not_equal, &rt_call);
+
// Verified that the constructor is a JSFunction.
// Load the initial map and verify that it is in fact a map.
// rdi: constructor
@@ -220,12 +172,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ j(not_equal, &allocate);
__ Push(rax);
+ __ Push(rdx);
__ Push(rdi);
__ Push(rdi); // constructor
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ Pop(rdi);
+ __ Pop(rdx);
__ Pop(rax);
__ movl(rsi, Immediate(Map::kSlackTrackingCounterEnd - 1));
@@ -269,8 +223,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ j(less, &no_inobject_slack_tracking);
// Allocate object with a slack.
- __ movzxbp(rsi,
- FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
+ __ movzxbp(
+ rsi,
+ FieldOperand(
+ rax, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
+ __ movzxbp(rax, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+ __ subp(rsi, rax);
__ leap(rsi,
Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
// rsi: offset of first field after pre-allocated fields
@@ -294,104 +252,59 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Move(Operand(rsi, AllocationMemento::kMapOffset),
factory->allocation_memento_map());
// Get the cell or undefined.
- __ movp(rdx, Operand(rsp, kPointerSize*2));
+ __ movp(rdx, Operand(rsp, 3 * kPointerSize));
+ __ AssertUndefinedOrAllocationSite(rdx);
__ movp(Operand(rsi, AllocationMemento::kAllocationSiteOffset), rdx);
} else {
__ InitializeFieldsWithFiller(rcx, rdi, rdx);
}
// Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
+ // and jump into the continuation code at any time from now on.
+ // rbx: JSObject (untagged)
__ orp(rbx, Immediate(kHeapObjectTag));
- // Check if a non-empty properties array is needed.
- // Allocate and initialize a FixedArray if it is.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- // Calculate total properties described map.
- __ movzxbp(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
- __ movzxbp(rcx,
- FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ addp(rdx, rcx);
- // Calculate unused properties past the end of the in-object properties.
- __ movzxbp(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
- __ subp(rdx, rcx);
- // Done if no extra properties are to be allocated.
- __ j(zero, &allocated);
- __ Assert(positive, kPropertyAllocationCountFailed);
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // rbx: JSObject
- // rdi: start of next object (will be start of FixedArray)
- // rdx: number of elements in properties array
- __ Allocate(FixedArray::kHeaderSize,
- times_pointer_size,
- rdx,
- rdi,
- rax,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
-
- // Initialize the FixedArray.
- // rbx: JSObject
- // rdi: FixedArray
- // rdx: number of elements
- // rax: start of next object
- __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
- __ movp(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
- __ Integer32ToSmi(rdx, rdx);
- __ movp(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
-
- // Initialize the fields to undefined.
- // rbx: JSObject
- // rdi: FixedArray
- // rax: start of next object
- // rdx: number of elements
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ leap(rcx, Operand(rdi, FixedArray::kHeaderSize));
- __ InitializeFieldsWithFiller(rcx, rax, rdx);
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // rbx: JSObject
- // rdi: FixedArray
- __ orp(rdi, Immediate(kHeapObjectTag)); // add the heap tag
- __ movp(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
-
-
// Continue with JSObject being successfully allocated
- // rbx: JSObject
+ // rbx: JSObject (tagged)
__ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // rbx: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(rbx);
}
// Allocate the new receiver object using the runtime call.
- // rdi: function (constructor)
+ // rdx: original constructor
__ bind(&rt_call);
- Generate_Runtime_NewObject(masm, create_memento, rdi, &count_incremented,
- &allocated);
+ int offset = kPointerSize;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ movp(rdi, Operand(rsp, kPointerSize * 3));
+ __ Push(rdi); // argument 1: allocation site
+ offset += kPointerSize;
+ }
+
+ // Must restore rsi (context) and rdi (constructor) before calling runtime.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movp(rdi, Operand(rsp, offset));
+ __ Push(rdi); // argument 2/1: constructor function
+ __ Push(rdx); // argument 3/2: original constructor
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 2);
+ }
+ __ movp(rbx, rax); // store result in rbx
+
+ // Runtime_NewObjectWithAllocationSite increments allocation count.
+ // Skip the increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
// New object allocated.
// rbx: newly allocated object
__ bind(&allocated);
if (create_memento) {
- int offset = (use_new_target ? 3 : 2) * kPointerSize;
- __ movp(rcx, Operand(rsp, offset));
+ __ movp(rcx, Operand(rsp, 3 * kPointerSize));
__ Cmp(rcx, masm->isolate()->factory()->undefined_value());
__ j(equal, &count_incremented);
// rcx is an AllocationSite. We are creating a memento from it, so we
@@ -403,9 +316,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore the parameters.
- if (use_new_target) {
- __ Pop(rdx);
- }
+ __ Pop(rdx);
__ Pop(rdi);
// Retrieve smi-tagged arguments count from the stack.
@@ -414,9 +325,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Push new.target onto the construct frame. This is stored just below the
// receiver on the stack.
- if (use_new_target) {
- __ Push(rdx);
- }
+ __ Push(rdx);
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
@@ -449,9 +358,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- // TODO(arv): Remove the "!use_new_target" before supporting optimization
- // of functions that reference new.target
- if (!is_api_function && !use_new_target) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@@ -479,8 +386,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Restore the arguments count and leave the construct frame. The arguments
// count is stored below the reciever and the new.target.
__ bind(&exit);
- int offset = (use_new_target ? 2 : 1) * kPointerSize;
- __ movp(rbx, Operand(rsp, offset));
+ __ movp(rbx, Operand(rsp, 2 * kPointerSize));
// Leave construct frame.
}
@@ -497,17 +403,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, true, false);
}
@@ -518,12 +419,14 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// -- rbx: allocation site or undefined
// -- rdx: original constructor
// -----------------------------------
- // TODO(dslomov): support pretenuring
- CHECK(!FLAG_pretenuring_call_new);
{
FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
+ // Preserve allocation site.
+ __ AssertUndefinedOrAllocationSite(rbx);
+ __ Push(rbx);
+
// Store a smi-tagged arguments count on the stack.
__ Integer32ToSmi(rax, rax);
__ Push(rax);
@@ -764,6 +667,147 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// o rdi: the JS function object being called
+// o rsi: our context
+// o rbp: the caller's frame pointer
+// o rsp: stack pointer (pointing to return address)
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-x64.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ pushq(rbp); // Caller's frame pointer.
+ __ movp(rbp, rsp);
+ __ Push(rsi); // Callee's context.
+ __ Push(rdi); // Callee's JS function.
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into edi (InterpreterBytecodeRegister).
+ __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(kInterpreterBytecodeArrayRegister,
+ FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+ __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+ rax);
+ __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size from the BytecodeArray object.
+ __ movl(rcx, FieldOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ __ movp(rdx, rsp);
+ __ subp(rdx, rcx);
+ __ CompareRoot(rdx, Heap::kRealStackLimitRootIndex);
+ __ j(above_equal, &ok, Label::kNear);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ Label loop_header;
+ Label loop_check;
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(always, &loop_check);
+ __ bind(&loop_header);
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ __ Push(rdx);
+ // Continue loop if not done.
+ __ bind(&loop_check);
+ __ subp(rcx, Immediate(kPointerSize));
+ __ j(greater_equal, &loop_header, Label::kNear);
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Support profiler (specifically profiling_counter).
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Allow simulator stop operations if FLAG_stop_at is set.
+ // - Deal with sloppy mode functions which need to replace the
+ // receiver with the global proxy when called as functions (without an
+ // explicit receiver object).
+ // - Code aging of the BytecodeArray object.
+ // - Supporting FLAG_trace.
+ //
+ // The following items are also not done here, and will probably be done using
+ // explicit bytecodes instead:
+ // - Allocating a new local context if applicable.
+ // - Setting up a local binding to the this function, which is used in
+ // derived constructors with super calls.
+ // - Setting new.target if required.
+ // - Dealing with REST parameters (only if
+ // https://codereview.chromium.org/1235153006 doesn't land by then).
+ // - Dealing with argument objects.
+
+ // Perform stack guard check.
+ {
+ Label ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok, Label::kNear);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ bind(&ok);
+ }
+
+ // Load accumulator, register file, bytecode offset, dispatch table into
+ // registers.
+ __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ movp(kInterpreterRegisterFileRegister, rbp);
+ __ subp(
+ kInterpreterRegisterFileRegister,
+ Immediate(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ movp(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ addp(kInterpreterDispatchTableRegister,
+ Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Dispatch to the first bytecode handler for the function.
+ __ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, times_1, 0));
+ __ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
+ times_pointer_size, 0));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(rbx);
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // The return value is in accumulator, which is already in rax.
+
+ // Leave the frame (also dropping the register file).
+ __ leave();
+ // Return droping receiver + arguments.
+ // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+ __ Ret(1 * kPointerSize, rcx);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@@ -1024,8 +1068,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Integer32ToSmi(rax, rax);
__ Push(rax);
- __ Push(rbx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ movp(rax, rbx);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ movp(rbx, rax);
__ Set(rdx, 0); // indicate regular JS_FUNCTION
@@ -1120,6 +1165,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int vectorOffset,
const int argumentsOffset,
const int indexOffset,
const int limitOffset) {
@@ -1136,12 +1182,9 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ movp(receiver, Operand(rbp, argumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
- FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
- Handle<TypeFeedbackVector> feedback_vector =
- masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
- int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
- __ Move(slot, Smi::FromInt(index));
- __ Move(vector, feedback_vector);
+ int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
+ __ Move(slot, Smi::FromInt(slot_index));
+ __ movp(vector, Operand(rbp, vectorOffset));
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -1188,6 +1231,13 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
static const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
static const int kFunctionOffset = kReceiverOffset + kPointerSize;
+ static const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+
+ // Push the vector.
+ __ movp(rdi, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdi, FieldOperand(rdi, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ Push(rdi);
__ Push(Operand(rbp, kFunctionOffset));
__ Push(Operand(rbp, kArgumentsOffset));
@@ -1199,9 +1249,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
Generate_CheckStackOverflow(masm, kFunctionOffset, kRaxIsSmiTagged);
- // Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ // Push current index and limit, and receiver.
+ const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
__ Push(rax); // limit
__ Push(Immediate(0)); // index
@@ -1245,8 +1294,9 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
// Convert the receiver to an object.
__ bind(&call_to_object);
- __ Push(rbx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ movp(rax, rbx);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ movp(rbx, rax);
__ jmp(&push_receiver, Label::kNear);
@@ -1260,8 +1310,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ Push(rbx);
// Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// Call the function.
Label call_proxy;
@@ -1311,6 +1361,14 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
static const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
static const int kFunctionOffset = kArgumentsOffset + kPointerSize;
+ static const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+
+ // Push the vector.
+ __ movp(rdi, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdi, FieldOperand(rdi, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ Push(rdi);
+
// If newTarget is not supplied, set it to constructor
Label validate_arguments;
__ movp(rax, Operand(rbp, kNewTargetOffset));
@@ -1329,29 +1387,26 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
Generate_CheckStackOverflow(masm, kFunctionOffset, kRaxIsSmiTagged);
// Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
__ Push(rax); // limit
__ Push(Immediate(0)); // index
- // Push newTarget and callee functions
- __ Push(Operand(rbp, kNewTargetOffset));
+ // Push the constructor function as callee.
__ Push(Operand(rbp, kFunctionOffset));
// Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// Use undefined feedback vector
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
__ movp(rdi, Operand(rbp, kFunctionOffset));
+ __ movp(rcx, Operand(rbp, kNewTargetOffset));
// Call the function.
CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
__ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
-
// Leave internal frame.
}
// remove this, target, arguments and newTarget
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index f467ea357f..1416fbd8b9 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/bootstrapper.h"
@@ -13,8 +11,8 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
-#include "src/jsregexp.h"
-#include "src/regexp-macro-assembler.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -32,7 +30,7 @@ static void InitializeArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(rax, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -48,7 +46,7 @@ static void InitializeInternalArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(rax, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -592,7 +590,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ PopReturnAddressTo(rbx);
__ Push(rdx);
__ PushReturnAddressFrom(rbx);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments, 1, 1);
}
@@ -888,10 +886,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ PushReturnAddressFrom(scratch);
// Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
- masm->isolate()),
- 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -1576,6 +1571,9 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpb(rcx, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
__ j(equal, &runtime_call, Label::kFar);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ cmpb(rcx, Immediate(static_cast<uint8_t>(SIMD128_VALUE_TYPE)));
+ __ j(equal, &runtime_call, Label::kFar);
if (is_strong(strength())) {
// We have already tested for smis and heap numbers, so if both
// arguments are not strings we must proceed to the slow case.
@@ -1763,31 +1761,38 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(rax);
// Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc == equal) {
- builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == equal && strict()) {
+ __ PushReturnAddressFrom(rcx);
+ __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
} else {
- builtin =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
- }
+ Builtins::JavaScript builtin;
+ if (cc == equal) {
+ builtin = Builtins::EQUALS;
+ } else {
+ builtin =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
+ __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
+ }
- __ PushReturnAddressFrom(rcx);
+ __ PushReturnAddressFrom(rcx);
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+ }
__ bind(&miss);
GenerateMiss(masm);
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
- // eax : number of arguments to the construct function
- // ebx : Feedback vector
- // edx : slot in feedback vector (Smi)
- // edi : the function to call
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
+ bool is_super) {
+ // rax : number of arguments to the construct function
+ // rbx : feedback vector
+ // rcx : original constructor (for IsSuperConstructorCall)
+ // rdx : slot in feedback vector (Smi)
+ // rdi : the function to call
FrameScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
@@ -1797,9 +1802,15 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
__ Integer32ToSmi(rdx, rdx);
__ Push(rdx);
__ Push(rbx);
+ if (is_super) {
+ __ Push(rcx);
+ }
__ CallStub(stub);
+ if (is_super) {
+ __ Pop(rcx);
+ }
__ Pop(rbx);
__ Pop(rdx);
__ Pop(rdi);
@@ -1808,39 +1819,40 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
+static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rax : number of arguments to the construct function
- // rbx : Feedback vector
+ // rbx : feedback vector
+ // rcx : original constructor (for IsSuperConstructorCall)
// rdx : slot in feedback vector (Smi)
// rdi : the function to call
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function,
done_no_smi_convert;
- // Load the cache state into rcx.
+ // Load the cache state into r11.
__ SmiToInteger32(rdx, rdx);
- __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize));
+ __ movp(r11,
+ FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- // We don't know if rcx is a WeakCell or a Symbol, but it's harmless to read
+ // We don't know if r11 is a WeakCell or a Symbol, but it's harmless to read
// at this position in a symbol (see static asserts in
// type-feedback-vector.h).
Label check_allocation_site;
- __ cmpp(rdi, FieldOperand(rcx, WeakCell::kValueOffset));
+ __ cmpp(rdi, FieldOperand(r11, WeakCell::kValueOffset));
__ j(equal, &done, Label::kFar);
- __ CompareRoot(rcx, Heap::kmegamorphic_symbolRootIndex);
+ __ CompareRoot(r11, Heap::kmegamorphic_symbolRootIndex);
__ j(equal, &done, Label::kFar);
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ __ CompareRoot(FieldOperand(r11, HeapObject::kMapOffset),
Heap::kWeakCellMapRootIndex);
__ j(not_equal, FLAG_pretenuring_call_new ? &miss : &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
- __ CheckSmi(FieldOperand(rcx, WeakCell::kValueOffset));
+ __ CheckSmi(FieldOperand(r11, WeakCell::kValueOffset));
__ j(equal, &initialize);
__ jmp(&megamorphic);
@@ -1850,12 +1862,12 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite.
- __ CompareRoot(FieldOperand(rcx, 0), Heap::kAllocationSiteMapRootIndex);
+ __ CompareRoot(FieldOperand(r11, 0), Heap::kAllocationSiteMapRootIndex);
__ j(not_equal, &miss);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
- __ cmpp(rdi, rcx);
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r11);
+ __ cmpp(rdi, r11);
__ j(not_equal, &megamorphic);
__ jmp(&done);
}
@@ -1864,7 +1876,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ CompareRoot(rcx, Heap::kuninitialized_symbolRootIndex);
+ __ CompareRoot(r11, Heap::kuninitialized_symbolRootIndex);
__ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
@@ -1879,19 +1891,19 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
if (!FLAG_pretenuring_call_new) {
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
- __ cmpp(rdi, rcx);
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r11);
+ __ cmpp(rdi, r11);
__ j(not_equal, &not_array_function);
CreateAllocationSiteStub create_stub(isolate);
- CallStubInRecordCallTarget(masm, &create_stub);
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ jmp(&done_no_smi_convert);
__ bind(&not_array_function);
}
CreateWeakCellStub create_stub(isolate);
- CallStubInRecordCallTarget(masm, &create_stub);
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ jmp(&done_no_smi_convert);
__ bind(&done);
@@ -1955,8 +1967,8 @@ static void EmitWrapCase(MacroAssembler* masm,
// Wrap the receiver and patch it back onto the stack.
{ FrameScope frame_scope(masm, StackFrame::INTERNAL);
__ Push(rdi);
- __ Push(rax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(rdi);
}
__ movp(args->GetReceiverOperand(), rax);
@@ -2029,19 +2041,19 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// rax : number of arguments
// rbx : feedback vector
- // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
- // vector (Smi)
+ // rcx : original constructor (for IsSuperConstructorCall)
+ // rdx : slot in feedback vector (Smi, for RecordCallTarget)
// rdi : constructor function
Label slow, non_function_call;
// Check that function is not a smi.
__ JumpIfSmi(rdi, &non_function_call);
// Check that function is a JSFunction.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, r11);
__ j(not_equal, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ GenerateRecordCallTarget(masm, IsSuperConstructorCall());
__ SmiToInteger32(rdx, rdx);
if (FLAG_pretenuring_call_new) {
@@ -2066,7 +2078,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// Pass original constructor to construct stub.
if (IsSuperConstructorCall()) {
- __ movp(rdx, Operand(rsp, rax, times_pointer_size, 2 * kPointerSize));
+ __ movp(rdx, rcx);
} else {
__ movp(rdx, rdi);
}
@@ -2081,10 +2093,10 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// rdi: called object
// rax: number of arguments
- // rcx: object map
+ // r11: object map
Label do_call;
__ bind(&slow);
- __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
+ __ CmpInstanceType(r11, JS_FUNCTION_PROXY_TYPE);
__ j(not_equal, &non_function_call);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
@@ -2321,11 +2333,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(rdx);
// Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
-
- ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
- __ CallExternalReference(miss, 3);
+ Runtime::FunctionId id = GetICState() == DEFAULT
+ ? Runtime::kCallIC_Miss
+ : Runtime::kCallIC_Customization_Miss;
+ __ CallRuntime(id, 3);
// Move result to edi and exit the internal frame.
__ movp(rdi, rax);
@@ -3263,7 +3274,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
__ bind(&single_char);
// rax: string
@@ -3500,7 +3511,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3798,7 +3809,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ bind(&miss);
@@ -3851,16 +3862,13 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
-
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(rdx);
__ Push(rax);
__ Push(rdx);
__ Push(rax);
__ Push(Smi::FromInt(op()));
- __ CallExternalReference(miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss, 3);
// Compute the entry point of the rewritten stub.
__ leap(rdi, FieldOperand(rax, Code::kHeaderSize));
@@ -3896,12 +3904,12 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Immediate(name->Hash() + NameDictionary::GetProbeOffset(i)));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ leap(index, Operand(index, index, times_2, 0)); // index *= 3.
Register entity_name = r0;
// Having undefined at this place means the name is not contained.
- DCHECK_EQ(kSmiTagSize, 1);
+ STATIC_ASSERT(kSmiTagSize == 1);
__ movp(entity_name, Operand(properties,
index,
times_pointer_size,
@@ -3967,7 +3975,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ andp(r1, r0);
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ leap(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
// Check if the key is identical to the name.
@@ -4029,7 +4037,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ andp(scratch, Operand(rsp, 0));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ leap(index(), Operand(scratch, scratch, times_2, 0)); // index *= 3.
// Having undefined at this place means the name is not contained.
@@ -4493,7 +4501,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::LOAD_IC, code_flags, false, receiver, name, feedback, no_reg);
+ masm, Code::LOAD_IC, code_flags, receiver, name, feedback, no_reg);
__ bind(&miss);
LoadIC::GenerateMiss(masm);
@@ -4726,12 +4734,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
Label normal_sequence;
if (mode == DONT_OVERRIDE) {
- DCHECK(FAST_SMI_ELEMENTS == 0);
- DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
- DCHECK(FAST_ELEMENTS == 2);
- DCHECK(FAST_HOLEY_ELEMENTS == 3);
- DCHECK(FAST_DOUBLE_ELEMENTS == 4);
- DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// is the low bit set? If so, we are holey and that is good.
__ testb(rdx, Immediate(1));
@@ -5028,6 +5036,161 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context_reg = rsi;
+ Register slot_reg = rbx;
+ Register result_reg = rax;
+ Label slow_case;
+
+ // Go up context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ movp(rdi, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ context_reg = rdi;
+ }
+
+ // Load the PropertyCell value at the specified slot.
+ __ movp(result_reg, ContextOperand(context_reg, slot_reg));
+ __ movp(result_reg, FieldOperand(result_reg, PropertyCell::kValueOffset));
+
+ // Check that value is not the_hole.
+ __ CompareRoot(result_reg, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &slow_case, Label::kNear);
+ __ Ret();
+
+ // Fallback to the runtime.
+ __ bind(&slow_case);
+ __ Integer32ToSmi(slot_reg, slot_reg);
+ __ PopReturnAddressTo(kScratchRegister);
+ __ Push(slot_reg);
+ __ Push(kScratchRegister);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+}
+
+
+void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context_reg = rsi;
+ Register slot_reg = rbx;
+ Register value_reg = rax;
+ Register cell_reg = r8;
+ Register cell_details_reg = rdx;
+ Register cell_value_reg = r9;
+ Label fast_heapobject_case, fast_smi_case, slow_case;
+
+ if (FLAG_debug_code) {
+ __ CompareRoot(value_reg, Heap::kTheHoleValueRootIndex);
+ __ Check(not_equal, kUnexpectedValue);
+ }
+
+ // Go up context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ movp(rdi, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ context_reg = rdi;
+ }
+
+ // Load the PropertyCell at the specified slot.
+ __ movp(cell_reg, ContextOperand(context_reg, slot_reg));
+
+ // Load PropertyDetails for the cell (actually only the cell_type, kind and
+ // READ_ONLY bit of attributes).
+ __ SmiToInteger32(cell_details_reg,
+ FieldOperand(cell_reg, PropertyCell::kDetailsOffset));
+ __ andl(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::kMask |
+ PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask));
+
+ // Check if PropertyCell holds mutable data.
+ Label not_mutable_data;
+ __ cmpl(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kMutable) |
+ PropertyDetails::KindField::encode(kData)));
+ __ j(not_equal, &not_mutable_data);
+ __ JumpIfSmi(value_reg, &fast_smi_case);
+ __ bind(&fast_heapobject_case);
+ __ movp(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
+ __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
+ cell_value_reg, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // RecordWriteField clobbers the value register, so we need to reload.
+ __ movp(value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
+ __ Ret();
+ __ bind(&not_mutable_data);
+
+ // Check if PropertyCell value matches the new value (relevant for Constant,
+ // ConstantType and Undefined cells).
+ Label not_same_value;
+ __ movp(cell_value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
+ __ cmpp(cell_value_reg, value_reg);
+ __ j(not_equal, &not_same_value,
+ FLAG_debug_code ? Label::kFar : Label::kNear);
+ // Make sure the PropertyCell is not marked READ_ONLY.
+ __ testl(cell_details_reg,
+ Immediate(PropertyDetails::kAttributesReadOnlyMask));
+ __ j(not_zero, &slow_case);
+ if (FLAG_debug_code) {
+ Label done;
+ // This can only be true for Constant, ConstantType and Undefined cells,
+ // because we never store the_hole via this stub.
+ __ cmpl(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstant) |
+ PropertyDetails::KindField::encode(kData)));
+ __ j(equal, &done);
+ __ cmpl(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+ __ j(equal, &done);
+ __ cmpl(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kUndefined) |
+ PropertyDetails::KindField::encode(kData)));
+ __ Check(equal, kUnexpectedValue);
+ __ bind(&done);
+ }
+ __ Ret();
+ __ bind(&not_same_value);
+
+ // Check if PropertyCell contains data with constant type (and is not
+ // READ_ONLY).
+ __ cmpl(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+ __ j(not_equal, &slow_case, Label::kNear);
+
+ // Now either both old and new values must be SMIs or both must be heap
+ // objects with same map.
+ Label value_is_heap_object;
+ __ JumpIfNotSmi(value_reg, &value_is_heap_object, Label::kNear);
+ __ JumpIfNotSmi(cell_value_reg, &slow_case, Label::kNear);
+ // Old and new values are SMIs, no need for a write barrier here.
+ __ bind(&fast_smi_case);
+ __ movp(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
+ __ Ret();
+ __ bind(&value_is_heap_object);
+ __ JumpIfSmi(cell_value_reg, &slow_case, Label::kNear);
+ Register cell_value_map_reg = cell_value_reg;
+ __ movp(cell_value_map_reg,
+ FieldOperand(cell_value_reg, HeapObject::kMapOffset));
+ __ cmpp(cell_value_map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
+ __ j(equal, &fast_heapobject_case);
+
+ // Fallback to the runtime.
+ __ bind(&slow_case);
+ __ Integer32ToSmi(slot_reg, slot_reg);
+ __ PopReturnAddressTo(kScratchRegister);
+ __ Push(slot_reg);
+ __ Push(value_reg);
+ __ Push(kScratchRegister);
+ __ TailCallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2, 1);
+}
+
+
static int Offset(ExternalReference ref0, ExternalReference ref1) {
int64_t offset = (ref0.address() - ref1.address());
// Check that fits into int.
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 0fa7dc4848..52ee1444d3 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/codegen.h"
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 0a551eef5c..728d04048e 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -12,9 +12,6 @@ namespace v8 {
namespace internal {
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/x64/cpu-x64.cc
index 0224b23a82..f98862b001 100644
--- a/deps/v8/src/x64/cpu-x64.cc
+++ b/deps/v8/src/x64/cpu-x64.cc
@@ -8,8 +8,6 @@
#include "src/third_party/valgrind/valgrind.h"
#endif
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/assembler.h"
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
deleted file mode 100644
index 1749760d37..0000000000
--- a/deps/v8/src/x64/debug-x64.cc
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/assembler.h"
-#include "src/codegen.h"
-#include "src/debug.h"
-
-
-namespace v8 {
-namespace internal {
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard int3 instructions can be added if required.
-void PatchCodeWithCall(Address pc, Address target, int guard_bytes) {
- int code_size = Assembler::kCallSequenceLength + guard_bytes;
-
- // Create a code patcher.
- CodePatcher patcher(pc, code_size);
-
-// Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_codesize;
- patcher.masm()->bind(&check_codesize);
-#endif
-
- // Patch the code.
- patcher.masm()->movp(kScratchRegister, reinterpret_cast<void*>(target),
- Assembler::RelocInfoNone());
- patcher.masm()->call(kScratchRegister);
-
- // Check that the size of the code generated is as expected.
- DCHECK_EQ(Assembler::kCallSequenceLength,
- patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-
- // Add the requested number of int3 instructions after the call.
- for (int i = 0; i < guard_bytes; i++) {
- patcher.masm()->int3();
- }
-
- CpuFeatures::FlushICache(pc, code_size);
-}
-
-
-// Patch the JS frame exit code with a debug break call. See
-// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x64.cc
-// for the precise return instructions sequence.
-void BreakLocation::SetDebugBreakAtReturn() {
- DCHECK(Assembler::kJSReturnSequenceLength >= Assembler::kCallSequenceLength);
- PatchCodeWithCall(
- pc(), debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
- Assembler::kJSReturnSequenceLength - Assembler::kCallSequenceLength);
-}
-
-
-void BreakLocation::SetDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- PatchCodeWithCall(
- pc(), debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry(),
- Assembler::kDebugBreakSlotLength - Assembler::kCallSequenceLength);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs,
- bool convert_call_to_jmp) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load padding words on stack.
- for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
- __ Push(Smi::FromInt(LiveEdit::kFramePaddingValue));
- }
- __ Push(Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as as two smis causing it to be untouched by GC.
- DCHECK((object_regs & ~kJSCallerSaved) == 0);
- DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
- DCHECK((object_regs & non_object_regs) == 0);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- DCHECK(!reg.is(kScratchRegister));
- if ((object_regs & (1 << r)) != 0) {
- __ Push(reg);
- }
- if ((non_object_regs & (1 << r)) != 0) {
- __ PushRegisterAsTwoSmis(reg);
- }
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ Set(rax, 0); // No arguments (argc == 0).
- __ Move(rbx, ExternalReference::debug_break(masm->isolate()));
-
- CEntryStub ceb(masm->isolate(), 1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if (FLAG_debug_code) {
- __ Set(reg, kDebugZapValue);
- }
- if ((object_regs & (1 << r)) != 0) {
- __ Pop(reg);
- }
- // Reconstruct the 64-bit value from two smis.
- if ((non_object_regs & (1 << r)) != 0) {
- __ PopRegisterAsTwoSmis(reg);
- }
- }
-
- // Read current padding counter and skip corresponding number of words.
- __ Pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ leap(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
-
- // Get rid of the internal frame.
- }
-
- // If this call did not replace a call but patched other code then there will
- // be an unwanted return address left on the stack. Here we get rid of that.
- if (convert_call_to_jmp) {
- __ addp(rsp, Immediate(kPCOnStackSize));
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ Move(kScratchRegister, after_break_target);
- __ Jump(Operand(kScratchRegister, 0));
-}
-
-
-void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallICStub
- // ----------- S t a t e -------------
- // -- rdx : type feedback slot (smi)
- // -- rdi : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rdx.bit() | rdi.bit(), 0, false);
-}
-
-
-void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // Register state just before return from JS function (from codegen-x64.cc).
- // ----------- S t a t e -------------
- // -- rax: return value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit(), 0, true);
-}
-
-
-void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-x64.cc).
- // ----------- S t a t e -------------
- // -- rdi : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rdi.bit(), 0, false);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallConstructStub (from code-stubs-x64.cc).
- // rax is the actual number of arguments not encoded as a smi, see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- rax: number of arguments
- // -----------------------------------
- // The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, rdi.bit(), rax.bit(), false);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
- MacroAssembler* masm) {
- // Register state for CallConstructStub (from code-stubs-x64.cc).
- // rax is the actual number of arguments not encoded as a smi, see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- rax: number of arguments
- // -- rbx: feedback array
- // -- rdx: feedback slot (smi)
- // -----------------------------------
- // The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdx.bit() | rdi.bit(),
- rax.bit(), false);
-}
-
-
-void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction.
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- __ Nop(Assembler::kDebugBreakSlotLength);
- DCHECK_EQ(Assembler::kDebugBreakSlotLength,
- masm->SizeOfCodeGeneratedSince(&check_codesize));
-}
-
-
-void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0, true);
-}
-
-
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->ret(0);
-}
-
-
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ Move(rax, restarter_frame_function_slot);
- __ movp(Operand(rax, 0), Immediate(0));
-
- // We do not know our frame height, but set rsp based on rbp.
- __ leap(rsp, Operand(rbp, -1 * kPointerSize));
-
- __ Pop(rdi); // Function.
- __ popq(rbp);
-
- // Load context from the function.
- __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Get function code.
- __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ leap(rdx, FieldOperand(rdx, Code::kHeaderSize));
-
- // Re-run JSFunction, rdi is function, rsi is context.
- __ jmp(rdx);
-}
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index cd3324c7e4..72c92f0a39 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -2,13 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/codegen.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/safepoint-table.h"
namespace v8 {
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 9a651c5152..5534887f5a 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -6,8 +6,6 @@
#include <stdarg.h>
#include <stdio.h>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/base/lazy-instance.h"
diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc
index 11db5b9ed6..fe99ee9046 100644
--- a/deps/v8/src/x64/frames-x64.cc
+++ b/deps/v8/src/x64/frames-x64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/assembler.h"
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 0719baffbf..1d9cf1ec13 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -71,12 +71,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
} } // namespace v8::internal
#endif // V8_X64_FRAMES_X64_H_
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 7602403508..a25d5f6f3c 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/interface-descriptors.h"
@@ -36,9 +34,11 @@ const Register VectorStoreICDescriptor::VectorRegister() { return rbx; }
const Register StoreTransitionDescriptor::MapRegister() { return rbx; }
-const Register ElementTransitionAndStoreDescriptor::MapRegister() {
- return rbx;
-}
+const Register LoadGlobalViaContextDescriptor::SlotRegister() { return rbx; }
+
+
+const Register StoreGlobalViaContextDescriptor::SlotRegister() { return rbx; }
+const Register StoreGlobalViaContextDescriptor::ValueRegister() { return rax; }
const Register InstanceofDescriptor::left() { return rax; }
@@ -64,6 +64,14 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
+void StoreTransitionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ MapRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rbx};
@@ -93,6 +101,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
}
+// static
+const Register ToObjectDescriptor::ReceiverRegister() { return rax; }
+
+
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rax};
@@ -160,12 +172,12 @@ void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments
// rbx : feedback vector
- // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
- // vector (Smi)
+ // rcx : original constructor (for IsSuperConstructorCall)
+ // rdx : slot in feedback vector (Smi, for RecordCallTarget)
// rdi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {rax, rdi, rbx};
+ Register registers[] = {rax, rdi, rcx, rbx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -332,11 +344,22 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
+void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ rdi, // math rounding function
+ rdx, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void MathRoundVariantCallFromOptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
rdi, // math rounding function
rdx, // vector slot id
+ rbx // type vector
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index c799da2b66..b936edc7de 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/base/bits.h"
@@ -122,7 +120,7 @@ bool LCodeGen::GeneratePrologue() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
@@ -458,6 +456,11 @@ bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
}
+bool LCodeGen::IsExternalConstant(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsExternal();
+}
+
+
bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
return op->IsConstantOperand() &&
chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
@@ -589,15 +592,23 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
if (op->IsStackSlot()) {
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
if (is_tagged) {
- translation->StoreStackSlot(op->index());
+ translation->StoreStackSlot(index);
} else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
+ translation->StoreUint32StackSlot(index);
} else {
- translation->StoreInt32StackSlot(op->index());
+ translation->StoreInt32StackSlot(index);
}
} else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
+ translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -2208,6 +2219,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ j(equal, instr->TrueLabel(chunk_));
}
+ if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ // SIMD value -> true.
+ __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
+ __ j(equal, instr->TrueLabel(chunk_));
+ }
+
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
@@ -2877,13 +2894,30 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
__ Move(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
- PREMONOMORPHIC).code();
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+ SLOPPY, PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(rsi));
+ DCHECK(ToRegister(instr->result()).is(rax));
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ Set(LoadGlobalViaContextDescriptor::SlotRegister(), slot);
+ Handle<Code> stub =
+ CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2996,7 +3030,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3086,40 +3120,31 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
elements_kind,
instr->base_offset()));
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
__ cvtss2sd(result, result);
- } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ } else if (elements_kind == FLOAT64_ELEMENTS) {
__ movsd(ToDoubleRegister(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
- case EXTERNAL_INT8_ELEMENTS:
case INT8_ELEMENTS:
__ movsxbl(result, operand);
break;
- case EXTERNAL_UINT8_ELEMENTS:
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
__ movzxbl(result, operand);
break;
- case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
__ movsxwl(result, operand);
break;
- case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
__ movzxwl(result, operand);
break;
- case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
__ movl(result, operand);
break;
- case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
__ movl(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
@@ -3127,8 +3152,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
}
break;
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
case FAST_ELEMENTS:
@@ -3247,7 +3270,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3477,10 +3500,9 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- __ Push(rsi); // The context is the first argument.
__ Push(instr->hydrogen()->pairs());
__ Push(Smi::FromInt(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kDeclareGlobals, 2, instr);
}
@@ -4196,6 +4218,11 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ movl(operand, Immediate(value));
}
+ } else if (IsExternalConstant(operand_value)) {
+ DCHECK(!hinstr->NeedsWriteBarrier());
+ ExternalReference ptr = ToExternalReference(operand_value);
+ __ Move(kScratchRegister, ptr);
+ __ movp(operand, kScratchRegister);
} else {
Handle<Object> handle_value = ToHandle(operand_value);
DCHECK(!hinstr->NeedsWriteBarrier());
@@ -4236,6 +4263,29 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(rsi));
+ DCHECK(ToRegister(instr->value())
+ .is(StoreGlobalViaContextDescriptor::ValueRegister()));
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ Set(StoreGlobalViaContextDescriptor::SlotRegister(), slot);
+ Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
+ isolate(), depth, instr->language_mode())
+ .code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
+ __ CallRuntime(is_strict(instr->language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Representation representation = instr->hydrogen()->length()->representation();
DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
@@ -4319,39 +4369,28 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
elements_kind,
instr->base_offset()));
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
XMMRegister value(ToDoubleRegister(instr->value()));
__ cvtsd2ss(value, value);
__ movss(operand, value);
- } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ } else if (elements_kind == FLOAT64_ELEMENTS) {
__ movsd(operand, ToDoubleRegister(instr->value()));
} else {
Register value(ToRegister(instr->value()));
switch (elements_kind) {
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_INT8_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case INT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
__ movb(operand, value);
break;
- case EXTERNAL_INT16_ELEMENTS:
- case EXTERNAL_UINT16_ELEMENTS:
case INT16_ELEMENTS:
case UINT16_ELEMENTS:
__ movw(operand, value);
break;
- case EXTERNAL_INT32_ELEMENTS:
- case EXTERNAL_UINT32_ELEMENTS:
case INT32_ELEMENTS:
case UINT32_ELEMENTS:
__ movl(operand, value);
break;
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
case FAST_ELEMENTS:
@@ -4475,7 +4514,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -5664,10 +5703,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
} else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- __ j(above_equal, false_label, false_distance);
- __ testb(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- final_branch_condition = zero;
+ final_branch_condition = below;
} else if (String::Equals(type_name, factory->symbol_string())) {
__ JumpIfSmi(input, false_label, false_distance);
@@ -5711,6 +5747,17 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = zero;
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(type_name, factory->type##_string())) { \
+ __ JumpIfSmi(input, false_label, false_distance); \
+ __ movp(input, FieldOperand(input, HeapObject::kMapOffset)); \
+ __ CompareRoot(input, Heap::k##Type##MapRootIndex); \
+ final_branch_condition = equal;
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
+
} else {
__ jmp(false_label, false_distance);
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 8fc7cdce21..b08eff1952 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -62,6 +62,7 @@ class LCodeGen: public LCodeGenBase {
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
+ bool IsExternalConstant(LConstantOperand* op) const;
bool IsDehoistedKeyConstant(LConstantOperand* op) const;
bool IsSmiConstant(LConstantOperand* op) const;
int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
index 79e7020816..800fb3f61c 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/x64/lithium-codegen-x64.h"
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.h b/deps/v8/src/x64/lithium-gap-resolver-x64.h
index da257e707c..7882da56e0 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.h
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.h
@@ -5,8 +5,6 @@
#ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
#define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
-#include "src/v8.h"
-
#include "src/lithium.h"
namespace v8 {
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 3c150e21c9..965b63c4cf 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -4,8 +4,6 @@
#include <sstream>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/hydrogen-osr.h"
@@ -374,6 +372,11 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
}
+void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d", depth(), slot_index());
+}
+
+
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -392,6 +395,12 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
+void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
+ value()->PrintTo(stream);
+}
+
+
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -1605,8 +1614,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsExternal()) {
- DCHECK(instr->left()->representation().IsExternal());
- DCHECK(instr->right()->representation().IsInteger32());
+ DCHECK(instr->IsConsistentExternalRepresentation());
DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
bool use_lea = LAddI::UseLea(instr);
LOperand* left = UseRegisterAtStart(instr->left());
@@ -2106,6 +2114,15 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
+ HLoadGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
+ DCHECK(instr->slot_index() > 0);
+ LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2219,7 +2236,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
FindDehoistedKeyDefinitions(instr->key());
}
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
} else {
@@ -2233,10 +2250,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
bool needs_environment;
- if (instr->is_external() || instr->is_fixed_typed_array()) {
+ if (instr->is_fixed_typed_array()) {
// see LCodeGen::DoLoadKeyedExternalArray
- needs_environment = (elements_kind == EXTERNAL_UINT32_ELEMENTS ||
- elements_kind == UINT32_ELEMENTS) &&
+ needs_environment = elements_kind == UINT32_ELEMENTS &&
!instr->CheckFlag(HInstruction::kUint32);
} else {
// see LCodeGen::DoLoadKeyedFixedDoubleArray and
@@ -2276,7 +2292,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
FindDehoistedKeyDefinitions(instr->key());
}
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
DCHECK(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
@@ -2310,14 +2326,9 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
!IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->value()->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(elements_kind)));
- DCHECK((instr->is_fixed_typed_array() &&
- instr->elements()->representation().IsTagged()) ||
- (instr->is_external() &&
- instr->elements()->representation().IsExternal()));
- bool val_is_temp_register =
- elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS;
+ DCHECK(instr->elements()->representation().IsExternal());
+ bool val_is_temp_register = elements_kind == UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == FLOAT32_ELEMENTS;
LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
: UseRegister(instr->value());
LOperand* key = NULL;
@@ -2471,6 +2482,19 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
+ HStoreGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), rsi);
+ LOperand* value = UseFixed(instr->value(),
+ StoreGlobalViaContextDescriptor::ValueRegister());
+ DCHECK(instr->slot_index() > 0);
+
+ LStoreGlobalViaContext* result =
+ new (zone()) LStoreGlobalViaContext(context, value);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* left = UseFixed(instr->left(), rdx);
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 50e0595025..2ba248642c 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -102,6 +102,7 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
+ V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -141,6 +142,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
+ V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1615,12 +1617,8 @@ inline static bool ExternalArrayOpRequiresTemp(
// an index cannot fold the scale operation into a load and need an extra
// temp register to do the work.
return SmiValuesAre31Bits() && key_representation.IsSmi() &&
- (elements_kind == EXTERNAL_INT8_ELEMENTS ||
- elements_kind == EXTERNAL_UINT8_ELEMENTS ||
- elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
- elements_kind == UINT8_ELEMENTS ||
- elements_kind == INT8_ELEMENTS ||
- elements_kind == UINT8_CLAMPED_ELEMENTS);
+ (elements_kind == UINT8_ELEMENTS || elements_kind == INT8_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS);
}
@@ -1634,15 +1632,9 @@ class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
- bool is_external() const {
- return hydrogen()->is_external();
- }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
void PrintDataTo(StringStream* stream) override;
@@ -1690,7 +1682,23 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
LOperand* temp_vector() { return temps_[0]; }
Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
+ TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+
+class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ LOperand* context() { return inputs_[0]; }
+
+ int depth() const { return hydrogen()->depth(); }
+ int slot_index() const { return hydrogen()->slot_index(); }
};
@@ -2185,6 +2193,28 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
+class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreGlobalViaContext(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
+ "store-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int depth() { return hydrogen()->depth(); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
@@ -2193,13 +2223,9 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
inputs_[2] = value;
}
- bool is_external() const { return hydrogen()->is_external(); }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 092b5bc83d..4e651274ba 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X64
#include "src/base/bits.h"
@@ -11,7 +9,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/heap/heap.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
@@ -1380,10 +1378,8 @@ void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
}
-void MacroAssembler::SmiAddConstant(Register dst,
- Register src,
- Smi* constant,
- SmiOperationExecutionMode mode,
+void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant,
+ SmiOperationConstraints constraints,
Label* bailout_label,
Label::Distance near_jump) {
if (constant->value() == 0) {
@@ -1394,12 +1390,12 @@ void MacroAssembler::SmiAddConstant(Register dst,
DCHECK(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
addp(dst, kScratchRegister);
- if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
+ if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
j(no_overflow, bailout_label, near_jump);
- DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
subp(dst, kScratchRegister);
- } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
- if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
+ } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
+ if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
Label done;
j(no_overflow, &done, Label::kNear);
subp(dst, kScratchRegister);
@@ -1410,11 +1406,11 @@ void MacroAssembler::SmiAddConstant(Register dst,
j(overflow, bailout_label, near_jump);
}
} else {
- CHECK(mode.IsEmpty());
+ UNREACHABLE();
}
} else {
- DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
- DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
+ DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
+ DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
LoadSmiConstant(dst, constant);
addp(dst, src);
j(overflow, bailout_label, near_jump);
@@ -1446,10 +1442,8 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
}
-void MacroAssembler::SmiSubConstant(Register dst,
- Register src,
- Smi* constant,
- SmiOperationExecutionMode mode,
+void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant,
+ SmiOperationConstraints constraints,
Label* bailout_label,
Label::Distance near_jump) {
if (constant->value() == 0) {
@@ -1460,12 +1454,12 @@ void MacroAssembler::SmiSubConstant(Register dst,
DCHECK(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
subp(dst, kScratchRegister);
- if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
+ if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
j(no_overflow, bailout_label, near_jump);
- DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
addp(dst, kScratchRegister);
- } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
- if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
+ } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
+ if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
Label done;
j(no_overflow, &done, Label::kNear);
addp(dst, kScratchRegister);
@@ -1476,11 +1470,11 @@ void MacroAssembler::SmiSubConstant(Register dst,
j(overflow, bailout_label, near_jump);
}
} else {
- CHECK(mode.IsEmpty());
+ UNREACHABLE();
}
} else {
- DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
- DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
+ DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
+ DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
if (constant->value() == Smi::kMinValue) {
DCHECK(!dst.is(kScratchRegister));
movp(dst, src);
@@ -3559,10 +3553,11 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
void MacroAssembler::DebugBreak() {
Set(rax, 0); // No arguments.
- LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
+ LoadAddress(rbx,
+ ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
CEntryStub ces(isolate(), 1);
DCHECK(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
}
@@ -4308,21 +4303,6 @@ void MacroAssembler::Allocate(Register object_size,
}
-void MacroAssembler::UndoAllocationInNewSpace(Register object) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- andp(object, Immediate(~kHeapObjectTagMask));
- Operand top_operand = ExternalOperand(new_space_allocation_top);
-#ifdef DEBUG
- cmpp(object, top_operand);
- Check(below, kUndoAllocationOfNonAllocatedMemory);
-#endif
- movp(top_operand, object);
-}
-
-
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch,
Label* gc_required,
@@ -5063,13 +5043,21 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
DCHECK(!scratch1.is(scratch0));
Register current = scratch0;
- Label loop_again;
+ Label loop_again, end;
movp(current, object);
+ movp(current, FieldOperand(current, HeapObject::kMapOffset));
+ movp(current, FieldOperand(current, Map::kPrototypeOffset));
+ CompareRoot(current, Heap::kNullValueRootIndex);
+ j(equal, &end);
// Loop based on the map going up the prototype chain.
bind(&loop_again);
movp(current, FieldOperand(current, HeapObject::kMapOffset));
+ STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ CmpInstanceType(current, JS_OBJECT_TYPE);
+ j(below, found);
movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
@@ -5077,6 +5065,8 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
movp(current, FieldOperand(current, Map::kPrototypeOffset));
CompareRoot(current, Heap::kNullValueRootIndex);
j(not_equal, &loop_again);
+
+ bind(&end);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 0016d99321..7852d39c03 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -7,12 +7,27 @@
#include "src/assembler.h"
#include "src/bailout-reason.h"
+#include "src/base/flags.h"
#include "src/frames.h"
#include "src/globals.h"
+#include "src/x64/frames-x64.h"
namespace v8 {
namespace internal {
+// Give alias names to registers for calling conventions.
+const Register kReturnRegister0 = {kRegister_rax_Code};
+const Register kReturnRegister1 = {kRegister_rdx_Code};
+const Register kJSFunctionRegister = {kRegister_rdi_Code};
+const Register kContextRegister = {kRegister_rsi_Code};
+const Register kInterpreterAccumulatorRegister = {kRegister_rax_Code};
+const Register kInterpreterRegisterFileRegister = {kRegister_r11_Code};
+const Register kInterpreterBytecodeOffsetRegister = {kRegister_r12_Code};
+const Register kInterpreterBytecodeArrayRegister = {kRegister_r14_Code};
+const Register kInterpreterDispatchTableRegister = {kRegister_r15_Code};
+const Register kRuntimeCallFunctionRegister = {kRegister_rbx_Code};
+const Register kRuntimeCallArgCountRegister = {kRegister_rax_Code};
+
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
@@ -32,21 +47,15 @@ enum PointersToHereCheck {
kPointersToHereAreAlwaysInteresting
};
-enum SmiOperationConstraint {
- PRESERVE_SOURCE_REGISTER,
- BAILOUT_ON_NO_OVERFLOW,
- BAILOUT_ON_OVERFLOW,
- NUMBER_OF_CONSTRAINTS
+enum class SmiOperationConstraint {
+ kPreserveSourceRegister = 1 << 0,
+ kBailoutOnNoOverflow = 1 << 1,
+ kBailoutOnOverflow = 1 << 2
};
-STATIC_ASSERT(NUMBER_OF_CONSTRAINTS <= 8);
+typedef base::Flags<SmiOperationConstraint> SmiOperationConstraints;
-class SmiOperationExecutionMode : public EnumSet<SmiOperationConstraint, byte> {
- public:
- SmiOperationExecutionMode() : EnumSet<SmiOperationConstraint, byte>(0) { }
- explicit SmiOperationExecutionMode(byte bits)
- : EnumSet<SmiOperationConstraint, byte>(bits) { }
-};
+DEFINE_OPERATORS_FOR_FLAGS(SmiOperationConstraints)
#ifdef DEBUG
bool AreAliased(Register reg1,
@@ -546,11 +555,8 @@ class MacroAssembler: public Assembler {
// Add an integer constant to a tagged smi, giving a tagged smi as result,
// or jumping to a label if the result cannot be represented by a smi.
- void SmiAddConstant(Register dst,
- Register src,
- Smi* constant,
- SmiOperationExecutionMode mode,
- Label* bailout_label,
+ void SmiAddConstant(Register dst, Register src, Smi* constant,
+ SmiOperationConstraints constraints, Label* bailout_label,
Label::Distance near_jump = Label::kFar);
// Subtract an integer constant from a tagged smi, giving a tagged smi as
@@ -560,11 +566,8 @@ class MacroAssembler: public Assembler {
// Subtract an integer constant from a tagged smi, giving a tagged smi as
// result, or jumping to a label if the result cannot be represented by a smi.
- void SmiSubConstant(Register dst,
- Register src,
- Smi* constant,
- SmiOperationExecutionMode mode,
- Label* bailout_label,
+ void SmiSubConstant(Register dst, Register src, Smi* constant,
+ SmiOperationConstraints constraints, Label* bailout_label,
Label::Distance near_jump = Label::kFar);
// Negating a smi can give a negative zero or too large positive value.
@@ -1173,12 +1176,6 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. Make sure that no pointers are left to the
- // object(s) no longer allocated as they would be invalid when allocation is
- // un-done.
- void UndoAllocationInNewSpace(Register object);
-
// Allocate a heap number in new space with undefined value. Returns
// tagged pointer in result register, or jumps to gc_required if new
// space is full.
@@ -1550,7 +1547,7 @@ class MacroAssembler: public Assembler {
class CodePatcher {
public:
CodePatcher(byte* address, int size);
- virtual ~CodePatcher();
+ ~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
@@ -1585,6 +1582,11 @@ inline Operand ContextOperand(Register context, int index) {
}
+inline Operand ContextOperand(Register context, Register index) {
+ return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
+}
+
+
inline Operand GlobalObjectOperand() {
return ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX);
}
diff --git a/deps/v8/src/x87/assembler-x87-inl.h b/deps/v8/src/x87/assembler-x87-inl.h
index 62beab8ed3..0e3de87e96 100644
--- a/deps/v8/src/x87/assembler-x87-inl.h
+++ b/deps/v8/src/x87/assembler-x87-inl.h
@@ -40,7 +40,7 @@
#include "src/x87/assembler-x87.h"
#include "src/assembler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
namespace v8 {
namespace internal {
@@ -53,35 +53,25 @@ static const int kNoCodeAgeSequenceLength = 5;
// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
- bool flush_icache = icache_flush_mode != SKIP_ICACHE_FLUSH;
+void RelocInfo::apply(intptr_t delta) {
if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
- if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
- } else if (rmode_ == CODE_AGE_SEQUENCE) {
+ } else if (IsCodeAgeSequence(rmode_)) {
if (*pc_ == kCallOpcode) {
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
- if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
}
- } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
- // Special handling of js_return when a break point is set (call
- // instruction has been inserted).
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
- *p -= delta; // Relocate entry.
- if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
- } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
+ } else if (IsDebugBreakSlot(rmode_) && IsPatchedDebugBreakSlotSequence()) {
// Special handling of a debug break slot when a break point is set (call
// instruction has been inserted).
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+ int32_t* p = reinterpret_cast<int32_t*>(
+ pc_ + Assembler::kPatchDebugBreakSlotAddressOffset);
*p -= delta; // Relocate entry.
- if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
} else if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p += delta; // Relocate entry.
- if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
}
}
@@ -245,17 +235,17 @@ void RelocInfo::set_code_age_stub(Code* stub,
}
-Address RelocInfo::call_address() {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Assembler::target_address_at(pc_ + 1, host_);
+Address RelocInfo::debug_call_address() {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+ Address location = pc_ + Assembler::kPatchDebugBreakSlotAddressOffset;
+ return Assembler::target_address_at(location, host_);
}
-void RelocInfo::set_call_address(Address target) {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Assembler::set_target_address_at(pc_ + 1, host_, target);
+void RelocInfo::set_debug_call_address(Address target) {
+ DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
+ Address location = pc_ + Assembler::kPatchDebugBreakSlotAddressOffset;
+ Assembler::set_target_address_at(location, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -264,23 +254,6 @@ void RelocInfo::set_call_address(Address target) {
}
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-Object** RelocInfo::call_object_address() {
- DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(pc_ + 1);
-}
-
-
void RelocInfo::WipeOut() {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)) {
@@ -319,11 +292,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- isolate->debug()->has_break_points()) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(this);
} else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
@@ -347,11 +317,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
+ } else if (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
@@ -503,11 +470,6 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
-Address Assembler::break_address_from_return_address(Address pc) {
- return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
-}
-
-
Displacement Assembler::disp_at(Label* L) {
return Displacement(long_at(L->pos()));
}
diff --git a/deps/v8/src/x87/assembler-x87.cc b/deps/v8/src/x87/assembler-x87.cc
index b7ba0cdf9b..1770477ae7 100644
--- a/deps/v8/src/x87/assembler-x87.cc
+++ b/deps/v8/src/x87/assembler-x87.cc
@@ -34,14 +34,13 @@
// significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
@@ -82,9 +81,9 @@ void Displacement::init(Label* L, Type type) {
const int RelocInfo::kApplyMask =
- RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
- 1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE |
- 1 << RelocInfo::DEBUG_BREAK_SLOT | 1 << RelocInfo::CODE_AGE_SEQUENCE;
+ RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
+ 1 << RelocInfo::INTERNAL_REFERENCE | 1 << RelocInfo::CODE_AGE_SEQUENCE |
+ RelocInfo::kDebugBreakSlotMask;
bool RelocInfo::IsCodedSpecially() {
diff --git a/deps/v8/src/x87/assembler-x87.h b/deps/v8/src/x87/assembler-x87.h
index d8c86abf7a..6d631785d6 100644
--- a/deps/v8/src/x87/assembler-x87.h
+++ b/deps/v8/src/x87/assembler-x87.h
@@ -524,9 +524,6 @@ class Assembler : public AssemblerBase {
// of that call in the instruction stream.
inline static Address target_address_from_return_address(Address pc);
- // Return the code target address of the patch debug break slot
- inline static Address break_address_from_return_address(Address pc);
-
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -544,21 +541,16 @@ class Assembler : public AssemblerBase {
// Distance between the address of the code target in the call instruction
// and the return address
static const int kCallTargetAddressOffset = kPointerSize;
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- static const int kPatchReturnSequenceAddressOffset = 1; // JMP imm32.
-
- // Distance between start of patched debug break slot and the emitted address
- // to jump to.
- static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32.
static const int kCallInstructionLength = 5;
- static const int kPatchDebugBreakSlotReturnOffset = kPointerSize;
- static const int kJSReturnSequenceLength = 6;
// The debug break slot must be able to contain a call instruction.
static const int kDebugBreakSlotLength = kCallInstructionLength;
+ // Distance between start of patched debug break slot and the emitted address
+ // to jump to.
+ static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32.
+
// One byte opcode for test al, 0xXX.
static const byte kTestAlByte = 0xA8;
// One byte opcode for nop.
@@ -951,11 +943,11 @@ class Assembler : public AssemblerBase {
return pc_offset() - label->pos();
}
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
+ // Mark generator continuation.
+ void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot();
+ void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
diff --git a/deps/v8/src/x87/builtins-x87.cc b/deps/v8/src/x87/builtins-x87.cc
index 55e648cab1..9acafd2ff8 100644
--- a/deps/v8/src/x87/builtins-x87.cc
+++ b/deps/v8/src/x87/builtins-x87.cc
@@ -2,14 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/code-factory.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/x87/frames-x87.h"
namespace v8 {
namespace internal {
@@ -100,45 +99,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
-static void Generate_Runtime_NewObject(MacroAssembler* masm,
- bool create_memento,
- Register original_constructor,
- Label* count_incremented,
- Label* allocated) {
- int offset = 0;
- if (create_memento) {
- // Get the cell or allocation site.
- __ mov(edi, Operand(esp, kPointerSize * 2));
- __ push(edi);
- offset = kPointerSize;
- }
-
- // Must restore esi (context) and edi (constructor) before calling
- // runtime.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(edi, Operand(esp, offset));
- __ push(edi);
- __ push(original_constructor);
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- } else {
- __ CallRuntime(Runtime::kNewObject, 2);
- }
- __ mov(ebx, eax); // store result in ebx
-
- // Runtime_NewObjectWithAllocationSite increments allocation count.
- // Skip the increment.
- if (create_memento) {
- __ jmp(count_incremented);
- } else {
- __ jmp(allocated);
- }
-}
-
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- eax: number of arguments
@@ -154,40 +116,27 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
- if (create_memento) {
- __ AssertUndefinedOrAllocationSite(ebx);
- __ push(ebx);
- }
-
// Preserve the incoming parameters on the stack.
+ __ AssertUndefinedOrAllocationSite(ebx);
+ __ push(ebx);
__ SmiTag(eax);
__ push(eax);
__ push(edi);
- if (use_new_target) {
- __ push(edx);
- }
-
- __ cmp(edx, edi);
- Label normal_new;
- Label count_incremented;
- Label allocated;
- __ j(equal, &normal_new);
-
- // Original constructor and function are different.
- Generate_Runtime_NewObject(masm, create_memento, edx, &count_incremented,
- &allocated);
- __ bind(&normal_new);
+ __ push(edx);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
- Label rt_call;
+ Label rt_call, allocated;
if (FLAG_inline_new) {
- Label undo_allocation;
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
__ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
__ j(not_equal, &rt_call);
+ // Fall back to runtime if the original constructor and function differ.
+ __ cmp(edx, edi);
+ __ j(not_equal, &rt_call);
+
// Verified that the constructor is a JSFunction.
// Load the initial map and verify that it is in fact a map.
// edi: constructor
@@ -224,12 +173,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ j(not_equal, &allocate);
__ push(eax);
+ __ push(edx);
__ push(edi);
__ push(edi); // constructor
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ pop(edi);
+ __ pop(edx);
__ pop(eax);
__ mov(esi, Map::kSlackTrackingCounterEnd - 1);
@@ -272,8 +223,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ j(less, &no_inobject_slack_tracking);
// Allocate object with a slack.
- __ movzx_b(esi,
- FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+ __ movzx_b(
+ esi,
+ FieldOperand(
+ eax, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
+ __ movzx_b(eax, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+ __ sub(esi, eax);
__ lea(esi,
Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
// esi: offset of first field after pre-allocated fields
@@ -298,7 +253,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ mov(Operand(esi, AllocationMemento::kMapOffset),
factory->allocation_memento_map());
// Get the cell or undefined.
- __ mov(edx, Operand(esp, kPointerSize*2));
+ __ mov(edx, Operand(esp, 3 * kPointerSize));
+ __ AssertUndefinedOrAllocationSite(edx);
__ mov(Operand(esi, AllocationMemento::kAllocationSiteOffset),
edx);
} else {
@@ -306,95 +262,52 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
+ // and jump into the continuation code at any time from now on.
+ // ebx: JSObject (untagged)
__ or_(ebx, Immediate(kHeapObjectTag));
- // Check if a non-empty properties array is needed.
- // Allocate and initialize a FixedArray if it is.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- // Calculate the total number of properties described by the map.
- __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
- __ movzx_b(ecx,
- FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
- __ add(edx, ecx);
- // Calculate unused properties past the end of the in-object properties.
- __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
- __ sub(edx, ecx);
- // Done if no extra properties are to be allocated.
- __ j(zero, &allocated);
- __ Assert(positive, kPropertyAllocationCountFailed);
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // ebx: JSObject
- // edi: start of next object (will be start of FixedArray)
- // edx: number of elements in properties array
- __ Allocate(FixedArray::kHeaderSize,
- times_pointer_size,
- edx,
- REGISTER_VALUE_IS_INT32,
- edi,
- ecx,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
-
- // Initialize the FixedArray.
- // ebx: JSObject
- // edi: FixedArray
- // edx: number of elements
- // ecx: start of next object
- __ mov(eax, factory->fixed_array_map());
- __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
- __ SmiTag(edx);
- __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
-
- // Initialize the fields to undefined.
- // ebx: JSObject
- // edi: FixedArray
- // ecx: start of next object
- __ mov(edx, factory->undefined_value());
- __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
- __ InitializeFieldsWithFiller(eax, ecx, edx);
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // ebx: JSObject
- // edi: FixedArray
- __ or_(edi, Immediate(kHeapObjectTag)); // add the heap tag
- __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
-
-
// Continue with JSObject being successfully allocated
- // ebx: JSObject
+ // ebx: JSObject (tagged)
__ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // ebx: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(ebx);
}
// Allocate the new receiver object using the runtime call.
+ // edx: original constructor
__ bind(&rt_call);
- Generate_Runtime_NewObject(masm, create_memento, edi, &count_incremented,
- &allocated);
+ int offset = kPointerSize;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ mov(edi, Operand(esp, kPointerSize * 3));
+ __ push(edi); // argument 1: allocation site
+ offset += kPointerSize;
+ }
+
+ // Must restore esi (context) and edi (constructor) before calling
+ // runtime.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(edi, Operand(esp, offset));
+ __ push(edi); // argument 2/1: constructor function
+ __ push(edx); // argument 3/2: original constructor
+ if (create_memento) {
+ __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
+ } else {
+ __ CallRuntime(Runtime::kNewObject, 2);
+ }
+ __ mov(ebx, eax); // store result in ebx
+
+ // Runtime_NewObjectWithAllocationSite increments allocation count.
+ // Skip the increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// New object allocated.
// ebx: newly allocated object
__ bind(&allocated);
if (create_memento) {
- int offset = (use_new_target ? 3 : 2) * kPointerSize;
- __ mov(ecx, Operand(esp, offset));
+ __ mov(ecx, Operand(esp, 3 * kPointerSize));
__ cmp(ecx, masm->isolate()->factory()->undefined_value());
__ j(equal, &count_incremented);
// ecx is an AllocationSite. We are creating a memento from it, so we
@@ -405,9 +318,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore the parameters.
- if (use_new_target) {
- __ pop(edx); // new.target
- }
+ __ pop(edx); // new.target
__ pop(edi); // Constructor function.
// Retrieve smi-tagged arguments count from the stack.
@@ -416,9 +327,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Push new.target onto the construct frame. This is stored just below the
// receiver on the stack.
- if (use_new_target) {
- __ push(edx);
- }
+ __ push(edx);
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
@@ -452,9 +361,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
- // TODO(arv): Remove the "!use_new_target" before supporting optimization
- // of functions that reference new.target
- if (!is_api_function && !use_new_target) {
+ if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@@ -482,8 +389,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Restore the arguments count and leave the construct frame. The arguments
// count is stored below the reciever and the new.target.
__ bind(&exit);
- int offset = (use_new_target ? 2 : 1) * kPointerSize;
- __ mov(ebx, Operand(esp, offset));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
// Leave construct frame.
}
@@ -499,17 +405,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, true, false);
}
@@ -521,12 +422,13 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// -- edx: original constructor
// -----------------------------------
- // TODO(dslomov): support pretenuring
- CHECK(!FLAG_pretenuring_call_new);
-
{
FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
+ // Preserve allocation site.
+ __ AssertUndefinedOrAllocationSite(ebx);
+ __ push(ebx);
+
// Preserve actual arguments count.
__ SmiTag(eax);
__ push(eax);
@@ -705,6 +607,156 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// o edi: the JS function object being called
+// o esi: our context
+// o ebp: the caller's frame pointer
+// o esp: stack pointer (pointing to return address)
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-ia32.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS function.
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into edi (InterpreterBytecodeRegister).
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(kInterpreterBytecodeArrayRegister,
+ FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+ __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+ eax);
+ __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size from the BytecodeArray object.
+ __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ __ mov(ecx, esp);
+ __ sub(ecx, ebx);
+ ExternalReference stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ cmp(ecx, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ Label loop_header;
+ Label loop_check;
+ __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
+ __ jmp(&loop_check);
+ __ bind(&loop_header);
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ __ push(eax);
+ // Continue loop if not done.
+ __ bind(&loop_check);
+ __ sub(ebx, Immediate(kPointerSize));
+ __ j(greater_equal, &loop_header);
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Support profiler (specifically profiling_counter).
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Allow simulator stop operations if FLAG_stop_at is set.
+ // - Deal with sloppy mode functions which need to replace the
+ // receiver with the global proxy when called as functions (without an
+ // explicit receiver object).
+ // - Code aging of the BytecodeArray object.
+ // - Supporting FLAG_trace.
+ //
+ // The following items are also not done here, and will probably be done using
+ // explicit bytecodes instead:
+ // - Allocating a new local context if applicable.
+ // - Setting up a local binding to the this function, which is used in
+ // derived constructors with super calls.
+ // - Setting new.target if required.
+ // - Dealing with REST parameters (only if
+ // https://codereview.chromium.org/1235153006 doesn't land by then).
+ // - Dealing with argument objects.
+
+ // Perform stack guard check.
+ {
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm->isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ bind(&ok);
+ }
+
+ // Load accumulator, register file, bytecode offset, dispatch table into
+ // registers.
+ __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
+ __ mov(kInterpreterRegisterFileRegister, ebp);
+ __ sub(
+ kInterpreterRegisterFileRegister,
+ Immediate(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ // Since the dispatch table root might be set after builtins are generated,
+ // load directly from the roots table.
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ add(kInterpreterDispatchTableRegister,
+ Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // TODO(rmcilroy) Push our context as a stack located parameter of the
+ // bytecode handler.
+
+ // Dispatch to the first bytecode handler for the function.
+ __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, times_1, 0));
+ __ mov(esi, Operand(kInterpreterDispatchTableRegister, esi,
+ times_pointer_size, 0));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ add(esi, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(esi);
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // The return value is in accumulator, which is already in rax.
+
+ // Leave the frame (also dropping the register file).
+ __ leave();
+ // Return droping receiver + arguments.
+ // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+ __ Ret(1 * kPointerSize, ecx);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@@ -961,8 +1013,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ SmiTag(eax);
__ push(eax);
- __ push(ebx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(eax, ebx);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mov(ebx, eax);
__ Move(edx, Immediate(0)); // restore
@@ -1056,6 +1109,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int vectorOffset,
const int argumentsOffset,
const int indexOffset,
const int limitOffset) {
@@ -1071,12 +1125,9 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ mov(receiver, Operand(ebp, argumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
- FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
- Handle<TypeFeedbackVector> feedback_vector =
- masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
- int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
- __ mov(slot, Immediate(Smi::FromInt(index)));
- __ mov(vector, Immediate(feedback_vector));
+ int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
+ __ mov(slot, Immediate(Smi::FromInt(slot_index)));
+ __ mov(vector, Operand(ebp, vectorOffset));
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ call(ic, RelocInfo::CODE_TARGET);
@@ -1124,6 +1175,13 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
static const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
static const int kFunctionOffset = kReceiverOffset + kPointerSize;
+ static const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+
+ // Push the vector.
+ __ mov(edi, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(edi, FieldOperand(edi, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ push(edi);
__ push(Operand(ebp, kFunctionOffset)); // push this
__ push(Operand(ebp, kArgumentsOffset)); // push arguments
@@ -1136,8 +1194,7 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
Generate_CheckStackOverflow(masm, kFunctionOffset, kEaxIsSmiTagged);
// Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
__ push(eax); // limit
__ push(Immediate(0)); // index
@@ -1182,8 +1239,9 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ j(above_equal, &push_receiver);
__ bind(&call_to_object);
- __ push(ebx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(eax, ebx);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mov(ebx, eax);
__ jmp(&push_receiver);
@@ -1197,8 +1255,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ push(ebx);
// Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// Call the function.
Label call_proxy;
@@ -1247,6 +1305,13 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
static const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
static const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
static const int kFunctionOffset = kArgumentsOffset + kPointerSize;
+ static const int kVectorOffset =
+ InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
+
+ // Push the vector.
+ __ mov(edi, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(edi, FieldOperand(edi, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ push(edi);
// If newTarget is not supplied, set it to constructor
Label validate_arguments;
@@ -1266,29 +1331,26 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
Generate_CheckStackOverflow(masm, kFunctionOffset, kEaxIsSmiTagged);
// Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
__ Push(eax); // limit
__ push(Immediate(0)); // index
- // Push newTarget and callee functions
- __ push(Operand(ebp, kNewTargetOffset));
+ // Push the constructor function as callee.
__ push(Operand(ebp, kFunctionOffset));
// Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(
- masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+ Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
+ kIndexOffset, kLimitOffset);
// Use undefined feedback vector
__ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
__ mov(edi, Operand(ebp, kFunctionOffset));
+ __ mov(ecx, Operand(ebp, kNewTargetOffset));
// Call the function.
CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
__ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
-
// Leave internal frame.
}
// remove this, target, arguments, and newTarget
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
index 875b798bda..bba43276fa 100644
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ b/deps/v8/src/x87/code-stubs-x87.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/base/bits.h"
@@ -14,9 +12,10 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
-#include "src/jsregexp.h"
-#include "src/regexp-macro-assembler.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
+#include "src/x87/frames-x87.h"
namespace v8 {
namespace internal {
@@ -37,7 +36,7 @@ static void InitializeArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -56,7 +55,7 @@ static void InitializeInternalArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+ JS_FUNCTION_STUB_MODE);
}
}
@@ -325,8 +324,28 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
void MathPowStub::Generate(MacroAssembler* masm) {
- // No SSE2 support
- UNREACHABLE();
+ const Register base = edx;
+ const Register scratch = ecx;
+ Counters* counters = isolate()->counters();
+ Label call_runtime;
+
+ // We will call runtime helper function directly.
+ if (exponent_type() == ON_STACK) {
+ // The arguments are still on the stack.
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+
+ // The stub is called from non-optimized code, which expects the result
+ // as heap number in exponent.
+ __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ IncrementCounter(counters->math_pow(), 1);
+ __ ret(2 * kPointerSize);
+ } else {
+ // Currently it's only called from full-compiler and exponent type is
+ // ON_STACK.
+ UNIMPLEMENTED();
+ }
}
@@ -368,9 +387,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ push(scratch); // return address
// Perform tail call to the entry.
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadElementWithInterceptor), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -470,7 +487,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ pop(ebx); // Return address.
__ push(edx);
__ push(ebx);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments, 1, 1);
}
@@ -1379,6 +1396,9 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpb(ecx, static_cast<uint8_t>(SYMBOL_TYPE));
__ j(equal, &runtime_call, Label::kFar);
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ cmpb(ecx, static_cast<uint8_t>(SIMD128_VALUE_TYPE));
+ __ j(equal, &runtime_call, Label::kFar);
if (is_strong(strength())) {
// We have already tested for smis and heap numbers, so if both
// arguments are not strings we must proceed to the slow case.
@@ -1579,59 +1599,83 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ push(eax);
// Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc == equal) {
- builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == equal && strict()) {
+ __ push(ecx);
+ __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
} else {
- builtin =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
- }
+ Builtins::JavaScript builtin;
+ if (cc == equal) {
+ builtin = Builtins::EQUALS;
+ } else {
+ builtin =
+ is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
+ __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ }
- // Restore return address on the stack.
- __ push(ecx);
+ // Restore return address on the stack.
+ __ push(ecx);
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+ }
__ bind(&miss);
GenerateMiss(masm);
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
+ bool is_super) {
// eax : number of arguments to the construct function
- // ebx : Feedback vector
+ // ebx : feedback vector
// edx : slot in feedback vector (Smi)
// edi : the function to call
- FrameScope scope(masm, StackFrame::INTERNAL);
+ // esp[0]: original receiver (for IsSuperConstructorCall)
+ if (is_super) {
+ __ pop(ecx);
+ }
- // Number-of-arguments register must be smi-tagged to call out.
- __ SmiTag(eax);
- __ push(eax);
- __ push(edi);
- __ push(edx);
- __ push(ebx);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Number-of-arguments register must be smi-tagged to call out.
+ __ SmiTag(eax);
+ __ push(eax);
+ __ push(edi);
+ __ push(edx);
+ __ push(ebx);
+ if (is_super) {
+ __ push(ecx);
+ }
- __ CallStub(stub);
+ __ CallStub(stub);
- __ pop(ebx);
- __ pop(edx);
- __ pop(edi);
- __ pop(eax);
- __ SmiUntag(eax);
+ if (is_super) {
+ __ pop(ecx);
+ }
+ __ pop(ebx);
+ __ pop(edx);
+ __ pop(edi);
+ __ pop(eax);
+ __ SmiUntag(eax);
+ }
+
+ if (is_super) {
+ __ push(ecx);
+ }
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
+static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// eax : number of arguments to the construct function
- // ebx : Feedback vector
+ // ebx : feedback vector
// edx : slot in feedback vector (Smi)
// edi : the function to call
+ // esp[0]: original receiver (for IsSuperConstructorCall)
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
@@ -1700,14 +1744,14 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(isolate);
- CallStubInRecordCallTarget(masm, &create_stub);
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ jmp(&done);
__ bind(&not_array_function);
}
CreateWeakCellStub create_stub(isolate);
- CallStubInRecordCallTarget(masm, &create_stub);
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ bind(&done);
}
@@ -1760,8 +1804,8 @@ static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
// Wrap the receiver and patch it back onto the stack.
{ FrameScope frame_scope(masm, StackFrame::INTERNAL);
__ push(edi);
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ pop(edi);
}
__ mov(Operand(esp, (argc + 1) * kPointerSize), eax);
@@ -1831,11 +1875,15 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// eax : number of arguments
// ebx : feedback vector
- // edx : (only if ebx is not the megamorphic symbol) slot in feedback
- // vector (Smi)
+ // ecx : original constructor (for IsSuperConstructorCall)
+ // edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
Label slow, non_function_call;
+ if (IsSuperConstructorCall()) {
+ __ push(ecx);
+ }
+
// Check that function is not a smi.
__ JumpIfSmi(edi, &non_function_call);
// Check that function is a JSFunction.
@@ -1843,7 +1891,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ GenerateRecordCallTarget(masm, IsSuperConstructorCall());
if (FLAG_pretenuring_call_new) {
// Put the AllocationSite from the feedback vector into ebx.
@@ -1868,7 +1916,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
if (IsSuperConstructorCall()) {
- __ mov(edx, Operand(esp, eax, times_pointer_size, 2 * kPointerSize));
+ __ pop(edx);
} else {
// Pass original constructor to construct stub.
__ mov(edx, edi);
@@ -1885,6 +1933,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// edi: called object
// eax: number of arguments
// ecx: object map
+ // esp[0]: original receiver (for IsSuperConstructorCall)
Label do_call;
__ bind(&slow);
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
@@ -1895,6 +1944,9 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ bind(&non_function_call);
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
+ if (IsSuperConstructorCall()) {
+ __ Drop(1);
+ }
// Set expected number of arguments to zero (not changing eax).
__ Move(ebx, Immediate(0));
Handle<Code> arguments_adaptor =
@@ -2122,11 +2174,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ push(edx);
// Call the entry.
- IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
- : IC::kCallIC_Customization_Miss;
-
- ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
- __ CallExternalReference(miss, 3);
+ Runtime::FunctionId id = GetICState() == DEFAULT
+ ? Runtime::kCallIC_Miss
+ : Runtime::kCallIC_Customization_Miss;
+ __ CallRuntime(id, 3);
// Move result to edi and exit the internal frame.
__ mov(edi, eax);
@@ -2716,10 +2767,9 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
- __ test(code_,
- Immediate(kSmiTagMask |
- ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
+ DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
+ __ test(code_, Immediate(kSmiTagMask |
+ ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
__ j(not_zero, &slow_case_);
Factory* factory = masm->isolate()->factory();
@@ -2995,7 +3045,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
__ bind(&single_char);
// eax: string
@@ -3218,7 +3268,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3502,7 +3552,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ bind(&miss);
@@ -3557,15 +3607,13 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
- ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
- isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(edx); // Preserve edx and eax.
__ push(eax);
__ push(edx); // And also use them as the arguments.
__ push(eax);
__ push(Immediate(Smi::FromInt(op())));
- __ CallExternalReference(miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss, 3);
// Compute the entry point of the rewritten stub.
__ lea(edi, FieldOperand(eax, Code::kHeaderSize));
__ pop(eax);
@@ -3606,11 +3654,11 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
NameDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
Register entity_name = r0;
// Having undefined at this place means the name is not contained.
- DCHECK_EQ(kSmiTagSize, 1);
+ STATIC_ASSERT(kSmiTagSize == 1);
__ mov(entity_name, Operand(properties, index, times_half_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ cmp(entity_name, masm->isolate()->factory()->undefined_value());
@@ -3678,7 +3726,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ and_(r0, r1);
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
// Check if the key is identical to the name.
@@ -3741,11 +3789,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ and_(scratch, Operand(esp, 0));
// Scale the index by multiplying by the entry size.
- DCHECK(NameDictionary::kEntrySize == 3);
+ STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ lea(index(), Operand(scratch, scratch, times_2, 0)); // index *= 3.
// Having undefined at this place means the name is not contained.
- DCHECK_EQ(kSmiTagSize, 1);
+ STATIC_ASSERT(kSmiTagSize == 1);
__ mov(scratch, Operand(dictionary(), index(), times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ cmp(scratch, isolate()->factory()->undefined_value());
@@ -4241,8 +4289,8 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ push(vector);
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::LOAD_IC, code_flags, false, receiver, name, vector, scratch);
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
+ receiver, name, vector, scratch);
__ pop(vector);
__ pop(slot);
@@ -4454,12 +4502,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// esp[4] - last argument
Label normal_sequence;
if (mode == DONT_OVERRIDE) {
- DCHECK(FAST_SMI_ELEMENTS == 0);
- DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
- DCHECK(FAST_ELEMENTS == 2);
- DCHECK(FAST_HOLEY_ELEMENTS == 3);
- DCHECK(FAST_DOUBLE_ELEMENTS == 4);
- DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// is the low bit set? If so, we are holey and that is good.
__ test_b(edx, 1);
@@ -4752,6 +4800,161 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context_reg = esi;
+ Register slot_reg = ebx;
+ Register result_reg = eax;
+ Label slow_case;
+
+ // Go up context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ mov(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ context_reg = result_reg;
+ }
+
+ // Load the PropertyCell value at the specified slot.
+ __ mov(result_reg, ContextOperand(context_reg, slot_reg));
+ __ mov(result_reg, FieldOperand(result_reg, PropertyCell::kValueOffset));
+
+ // Check that value is not the_hole.
+ __ CompareRoot(result_reg, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &slow_case, Label::kNear);
+ __ Ret();
+
+ // Fallback to the runtime.
+ __ bind(&slow_case);
+ __ SmiTag(slot_reg);
+ __ Pop(result_reg); // Pop return address.
+ __ Push(slot_reg);
+ __ Push(result_reg); // Push return address.
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+}
+
+
+void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
+ Register context_reg = esi;
+ Register slot_reg = ebx;
+ Register value_reg = eax;
+ Register cell_reg = edi;
+ Register cell_details_reg = edx;
+ Register cell_value_reg = ecx;
+ Label fast_heapobject_case, fast_smi_case, slow_case;
+
+ if (FLAG_debug_code) {
+ __ CompareRoot(value_reg, Heap::kTheHoleValueRootIndex);
+ __ Check(not_equal, kUnexpectedValue);
+ }
+
+ // Go up context chain to the script context.
+ for (int i = 0; i < depth(); ++i) {
+ __ mov(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ context_reg = cell_reg;
+ }
+
+ // Load the PropertyCell at the specified slot.
+ __ mov(cell_reg, ContextOperand(context_reg, slot_reg));
+
+ // Load PropertyDetails for the cell (actually only the cell_type and kind).
+ __ mov(cell_details_reg,
+ FieldOperand(cell_reg, PropertyCell::kDetailsOffset));
+ __ SmiUntag(cell_details_reg);
+ __ and_(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::kMask |
+ PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask));
+
+ // Check if PropertyCell holds mutable data.
+ Label not_mutable_data;
+ __ cmp(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kMutable) |
+ PropertyDetails::KindField::encode(kData)));
+ __ j(not_equal, &not_mutable_data);
+ __ JumpIfSmi(value_reg, &fast_smi_case);
+ __ bind(&fast_heapobject_case);
+ __ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
+ __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
+ cell_details_reg, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ // RecordWriteField clobbers the value register, so we need to reload.
+ __ mov(value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
+ __ Ret();
+ __ bind(&not_mutable_data);
+
+ // Check if PropertyCell value matches the new value (relevant for Constant,
+ // ConstantType and Undefined cells).
+ Label not_same_value;
+ __ mov(cell_value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
+ __ cmp(cell_value_reg, value_reg);
+ __ j(not_equal, &not_same_value,
+ FLAG_debug_code ? Label::kFar : Label::kNear);
+ // Make sure the PropertyCell is not marked READ_ONLY.
+ __ test(cell_details_reg,
+ Immediate(PropertyDetails::kAttributesReadOnlyMask));
+ __ j(not_zero, &slow_case);
+ if (FLAG_debug_code) {
+ Label done;
+ // This can only be true for Constant, ConstantType and Undefined cells,
+ // because we never store the_hole via this stub.
+ __ cmp(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstant) |
+ PropertyDetails::KindField::encode(kData)));
+ __ j(equal, &done);
+ __ cmp(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+ __ j(equal, &done);
+ __ cmp(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kUndefined) |
+ PropertyDetails::KindField::encode(kData)));
+ __ Check(equal, kUnexpectedValue);
+ __ bind(&done);
+ }
+ __ Ret();
+ __ bind(&not_same_value);
+
+ // Check if PropertyCell contains data with constant type (and is not
+ // READ_ONLY).
+ __ cmp(cell_details_reg,
+ Immediate(PropertyDetails::PropertyCellTypeField::encode(
+ PropertyCellType::kConstantType) |
+ PropertyDetails::KindField::encode(kData)));
+ __ j(not_equal, &slow_case, Label::kNear);
+
+ // Now either both old and new values must be SMIs or both must be heap
+ // objects with same map.
+ Label value_is_heap_object;
+ __ JumpIfNotSmi(value_reg, &value_is_heap_object, Label::kNear);
+ __ JumpIfNotSmi(cell_value_reg, &slow_case, Label::kNear);
+ // Old and new values are SMIs, no need for a write barrier here.
+ __ bind(&fast_smi_case);
+ __ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
+ __ Ret();
+ __ bind(&value_is_heap_object);
+ __ JumpIfSmi(cell_value_reg, &slow_case, Label::kNear);
+ Register cell_value_map_reg = cell_value_reg;
+ __ mov(cell_value_map_reg,
+ FieldOperand(cell_value_reg, HeapObject::kMapOffset));
+ __ cmp(cell_value_map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
+ __ j(equal, &fast_heapobject_case);
+
+ // Fallback to the runtime.
+ __ bind(&slow_case);
+ __ SmiTag(slot_reg);
+ __ Pop(cell_reg); // Pop return address.
+ __ Push(slot_reg);
+ __ Push(value_reg);
+ __ Push(cell_reg); // Push return address.
+ __ TailCallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2, 1);
+}
+
+
// Generates an Operand for saving parameters after PrepareCallApiFunction.
static Operand ApiParameterOperand(int index) {
return Operand(esp, index * kPointerSize);
diff --git a/deps/v8/src/x87/codegen-x87.cc b/deps/v8/src/x87/codegen-x87.cc
index f9f079f6f9..ba8bef34e4 100644
--- a/deps/v8/src/x87/codegen-x87.cc
+++ b/deps/v8/src/x87/codegen-x87.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/codegen.h"
diff --git a/deps/v8/src/x87/cpu-x87.cc b/deps/v8/src/x87/cpu-x87.cc
index 84e385dc47..22906b31be 100644
--- a/deps/v8/src/x87/cpu-x87.cc
+++ b/deps/v8/src/x87/cpu-x87.cc
@@ -8,8 +8,6 @@
#include "src/third_party/valgrind/valgrind.h"
#endif
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/assembler.h"
diff --git a/deps/v8/src/x87/debug-x87.cc b/deps/v8/src/x87/debug-x87.cc
deleted file mode 100644
index d0fcc82eaa..0000000000
--- a/deps/v8/src/x87/debug-x87.cc
+++ /dev/null
@@ -1,282 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/codegen.h"
-#include "src/debug.h"
-
-
-namespace v8 {
-namespace internal {
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard int3 instructions can be added if required.
-void PatchCodeWithCall(Address pc, Address target, int guard_bytes) {
- // Call instruction takes up 5 bytes and int3 takes up one byte.
- static const int kCallCodeSize = 5;
- int code_size = kCallCodeSize + guard_bytes;
-
- // Create a code patcher.
- CodePatcher patcher(pc, code_size);
-
-// Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_codesize;
- patcher.masm()->bind(&check_codesize);
-#endif
-
- // Patch the code.
- patcher.masm()->call(target, RelocInfo::NONE32);
-
- // Check that the size of the code generated is as expected.
- DCHECK_EQ(kCallCodeSize,
- patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-
- // Add the requested number of int3 instructions after the call.
- DCHECK_GE(guard_bytes, 0);
- for (int i = 0; i < guard_bytes; i++) {
- patcher.masm()->int3();
- }
-
- CpuFeatures::FlushICache(pc, code_size);
-}
-
-
-// Patch the JS frame exit code with a debug break call. See
-// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x87.cc
-// for the precise return instructions sequence.
-void BreakLocation::SetDebugBreakAtReturn() {
- DCHECK(Assembler::kJSReturnSequenceLength >=
- Assembler::kCallInstructionLength);
- PatchCodeWithCall(
- pc(), debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
- Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
-}
-
-
-void BreakLocation::SetDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- Isolate* isolate = debug_info_->GetIsolate();
- PatchCodeWithCall(
- pc(), isolate->builtins()->Slot_DebugBreak()->entry(),
- Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs,
- bool convert_call_to_jmp) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load padding words on stack.
- for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
- __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingValue)));
- }
- __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- DCHECK((object_regs & ~kJSCallerSaved) == 0);
- DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
- DCHECK((object_regs & non_object_regs) == 0);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
- }
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ test(reg, Immediate(0xc0000000));
- __ Assert(zero, kUnableToEncodeValueAsSmi);
- }
- __ SmiTag(reg);
- __ push(reg);
- }
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ Move(eax, Immediate(0)); // No arguments.
- __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(masm->isolate(), 1);
- __ CallStub(&ceb);
-
- // Automatically find register that could be used after register restore.
- // We need one register for padding skip instructions.
- Register unused_reg = { -1 };
-
- // Restore the register values containing object pointers from the
- // expression stack.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if (FLAG_debug_code) {
- __ Move(reg, Immediate(kDebugZapValue));
- }
- bool taken = reg.code() == esi.code();
- if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
- taken = true;
- }
- if ((non_object_regs & (1 << r)) != 0) {
- __ pop(reg);
- __ SmiUntag(reg);
- taken = true;
- }
- if (!taken) {
- unused_reg = reg;
- }
- }
-
- DCHECK(unused_reg.code() != -1);
-
- // Read current padding counter and skip corresponding number of words.
- __ pop(unused_reg);
- // We divide stored value by 2 (untagging) and multiply it by word's size.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0));
-
- // Get rid of the internal frame.
- }
-
- // If this call did not replace a call but patched other code then there will
- // be an unwanted return address left on the stack. Here we get rid of that.
- if (convert_call_to_jmp) {
- __ add(esp, Immediate(kPointerSize));
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference::debug_after_break_target_address(masm->isolate());
- __ jmp(Operand::StaticVariable(after_break_target));
-}
-
-
-void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallICStub
- // ----------- S t a t e -------------
- // -- edx : type feedback slot (smi)
- // -- edi : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, edx.bit() | edi.bit(),
- 0, false);
-}
-
-
-void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // Register state just before return from JS function (from codegen-x87.cc).
- // ----------- S t a t e -------------
- // -- eax: return value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
-}
-
-
-void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-x87.cc).
- // ----------- S t a t e -------------
- // -- edi: function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, edi.bit(), 0, false);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallConstructStub (from code-stubs-x87.cc).
- // eax is the actual number of arguments not encoded as a smi see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- eax: number of arguments (not smi)
- // -- edi: constructor function
- // -----------------------------------
- // The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false);
-}
-
-
-void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
- MacroAssembler* masm) {
- // Register state for CallConstructStub (from code-stubs-x87.cc).
- // eax is the actual number of arguments not encoded as a smi see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- eax: number of arguments (not smi)
- // -- ebx: feedback array
- // -- edx: feedback slot (smi)
- // -- edi: constructor function
- // -----------------------------------
- // The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, ebx.bit() | edx.bit() | edi.bit(),
- eax.bit(), false);
-}
-
-
-void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction.
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- __ Nop(Assembler::kDebugBreakSlotLength);
- DCHECK_EQ(Assembler::kDebugBreakSlotLength,
- masm->SizeOfCodeGeneratedSince(&check_codesize));
-}
-
-
-void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
- Generate_DebugBreakCallHelper(masm, 0, 0, true);
-}
-
-
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->ret(0);
-}
-
-
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
-
- // We do not know our frame height, but set esp based on ebp.
- __ lea(esp, Operand(ebp, -1 * kPointerSize));
-
- __ pop(edi); // Function.
- __ pop(ebp);
-
- // Load context from the function.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Get function code.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
-
- // Re-run JSFunction, edi is function, esi is context.
- __ jmp(edx);
-}
-
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/x87/deoptimizer-x87.cc b/deps/v8/src/x87/deoptimizer-x87.cc
index 533ce1abe6..3a5d2640be 100644
--- a/deps/v8/src/x87/deoptimizer-x87.cc
+++ b/deps/v8/src/x87/deoptimizer-x87.cc
@@ -2,14 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/codegen.h"
#include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/safepoint-table.h"
+#include "src/x87/frames-x87.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/x87/disasm-x87.cc b/deps/v8/src/x87/disasm-x87.cc
index 009bebbc27..bf2200ca11 100644
--- a/deps/v8/src/x87/disasm-x87.cc
+++ b/deps/v8/src/x87/disasm-x87.cc
@@ -6,8 +6,6 @@
#include <stdarg.h>
#include <stdio.h>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/disasm.h"
@@ -1284,11 +1282,7 @@ int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
} else if (*data == 0x2A) {
// movntdqa
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movntdqa %s,", NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
+ UnimplementedInstruction();
} else {
UnimplementedInstruction();
}
@@ -1470,9 +1464,8 @@ int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
if (mod == 3) {
- AppendToBuffer("movntdq ");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ // movntdq
+ UnimplementedInstruction();
} else {
UnimplementedInstruction();
}
diff --git a/deps/v8/src/x87/frames-x87.cc b/deps/v8/src/x87/frames-x87.cc
index 557794f3a2..6b4db97880 100644
--- a/deps/v8/src/x87/frames-x87.cc
+++ b/deps/v8/src/x87/frames-x87.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/assembler.h"
diff --git a/deps/v8/src/x87/frames-x87.h b/deps/v8/src/x87/frames-x87.h
index 78209258d6..c9e75e83ea 100644
--- a/deps/v8/src/x87/frames-x87.h
+++ b/deps/v8/src/x87/frames-x87.h
@@ -79,12 +79,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
} } // namespace v8::internal
#endif // V8_X87_FRAMES_X87_H_
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
index 05fa9b8926..204e0bf619 100644
--- a/deps/v8/src/x87/interface-descriptors-x87.cc
+++ b/deps/v8/src/x87/interface-descriptors-x87.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/interface-descriptors.h"
@@ -32,12 +30,16 @@ const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return edi; }
const Register VectorStoreICDescriptor::VectorRegister() { return ebx; }
-const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
+const Register StoreTransitionDescriptor::MapRegister() {
+ return FLAG_vector_stores ? no_reg : ebx;
+}
-const Register ElementTransitionAndStoreDescriptor::MapRegister() {
- return ebx;
-}
+const Register LoadGlobalViaContextDescriptor::SlotRegister() { return ebx; }
+
+
+const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
+const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
const Register InstanceofDescriptor::left() { return eax; }
@@ -63,6 +65,20 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
+void StoreTransitionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ MapRegister()};
+
+ // When FLAG_vector_stores is true, we want to pass the map register on the
+ // stack instead of in a register.
+ DCHECK(FLAG_vector_stores || !MapRegister().is(no_reg));
+
+ int register_count = FLAG_vector_stores ? 3 : 4;
+ data->InitializePlatformSpecific(register_count, registers);
+}
+
+
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ebx};
@@ -85,6 +101,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
}
+// static
+const Register ToObjectDescriptor::ReceiverRegister() { return eax; }
+
+
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {eax};
@@ -159,12 +179,12 @@ void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
// ebx : feedback vector
- // edx : (only if ebx is not the megamorphic symbol) slot in feedback
- // vector (Smi)
+ // ecx : original constructor (for IsSuperConstructorCall)
+ // edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {eax, edi, ebx};
+ Register registers[] = {eax, edi, ecx, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
@@ -332,11 +352,22 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
+void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ edi, // math rounding function
+ edx, // vector slot id
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void MathRoundVariantCallFromOptimizedCodeDescriptor::
+ InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
edi, // math rounding function
edx, // vector slot id
+ ebx // type vector
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/x87/lithium-codegen-x87.cc b/deps/v8/src/x87/lithium-codegen-x87.cc
index 6021ec74e9..0c852640e8 100644
--- a/deps/v8/src/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/x87/lithium-codegen-x87.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/base/bits.h"
@@ -15,6 +13,7 @@
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/x87/frames-x87.h"
#include "src/x87/lithium-codegen-x87.h"
namespace v8 {
@@ -102,7 +101,7 @@ bool LCodeGen::GeneratePrologue() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
@@ -807,6 +806,10 @@ int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
const Representation& r) const {
HConstant* constant = chunk_->LookupConstant(op);
+ if (r.IsExternal()) {
+ return reinterpret_cast<int32_t>(
+ constant->ExternalReferenceValue().address());
+ }
int32_t value = constant->Integer32Value();
if (r.IsInteger32()) return value;
DCHECK(r.IsSmiOrTagged());
@@ -940,15 +943,23 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
if (op->IsStackSlot()) {
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
if (is_tagged) {
- translation->StoreStackSlot(op->index());
+ translation->StoreStackSlot(index);
} else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
+ translation->StoreUint32StackSlot(index);
} else {
- translation->StoreInt32StackSlot(op->index());
+ translation->StoreInt32StackSlot(index);
}
} else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
+ int index = op->index();
+ if (index >= 0) {
+ index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+ }
+ translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -2434,6 +2445,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ j(equal, instr->TrueLabel(chunk_));
}
+ if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+ // SIMD value -> true.
+ __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
+ __ j(equal, instr->TrueLabel(chunk_));
+ }
+
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
@@ -3129,13 +3146,31 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
__ mov(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
- PREMONOMORPHIC).code();
+ Handle<Code> ic =
+ CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+ SLOPPY, PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
+void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(esi));
+ DCHECK(ToRegister(instr->result()).is(eax));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
+ __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
+ Handle<Code> stub =
+ CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
+ }
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3242,7 +3277,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
+ isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3314,38 +3349,29 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
instr->hydrogen()->key()->representation(),
elements_kind,
instr->base_offset()));
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
- } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ } else if (elements_kind == FLOAT64_ELEMENTS) {
X87Mov(ToX87Register(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
- case EXTERNAL_INT8_ELEMENTS:
case INT8_ELEMENTS:
__ movsx_b(result, operand);
break;
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
__ movzx_b(result, operand);
break;
- case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
__ movsx_w(result, operand);
break;
- case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
__ movzx_w(result, operand);
break;
- case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
__ mov(result, operand);
break;
- case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
@@ -3353,8 +3379,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
}
break;
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -3433,7 +3457,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@@ -3664,10 +3688,9 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- __ push(esi); // The context is the first argument.
__ push(Immediate(instr->hydrogen()->pairs()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kDeclareGlobals, 2, instr);
}
@@ -4472,7 +4495,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (operand_value->IsRegister()) {
Register value = ToRegister(operand_value);
__ Store(value, operand, representation);
- } else if (representation.IsInteger32()) {
+ } else if (representation.IsInteger32() || representation.IsExternal()) {
Immediate immediate = ToImmediate(operand_value, representation);
DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
__ mov(operand, immediate);
@@ -4515,6 +4538,30 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
+ DCHECK(ToRegister(instr->context()).is(esi));
+ DCHECK(ToRegister(instr->value())
+ .is(StoreGlobalViaContextDescriptor::ValueRegister()));
+
+ int const slot = instr->slot_index();
+ int const depth = instr->depth();
+ if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
+ __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
+ Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
+ isolate(), depth, instr->language_mode())
+ .code();
+ CallCode(stub, RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ Push(Smi::FromInt(slot));
+ __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
+ __ CallRuntime(is_strict(instr->language_mode())
+ ? Runtime::kStoreGlobalViaContext_Strict
+ : Runtime::kStoreGlobalViaContext_Sloppy,
+ 2);
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
if (instr->index()->IsConstantOperand()) {
@@ -4554,11 +4601,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
instr->hydrogen()->key()->representation(),
elements_kind,
instr->base_offset()));
- if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
- elements_kind == FLOAT32_ELEMENTS) {
+ if (elements_kind == FLOAT32_ELEMENTS) {
X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand);
- } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ } else if (elements_kind == FLOAT64_ELEMENTS) {
uint64_t int_val = kHoleNanInt64;
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
@@ -4588,28 +4633,19 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
} else {
Register value = ToRegister(instr->value());
switch (elements_kind) {
- case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
- case EXTERNAL_UINT8_ELEMENTS:
- case EXTERNAL_INT8_ELEMENTS:
case UINT8_ELEMENTS:
case INT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
__ mov_b(operand, value);
break;
- case EXTERNAL_INT16_ELEMENTS:
- case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
case INT16_ELEMENTS:
__ mov_w(operand, value);
break;
- case EXTERNAL_INT32_ELEMENTS:
- case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
case INT32_ELEMENTS:
__ mov(operand, value);
break;
- case EXTERNAL_FLOAT32_ELEMENTS:
- case EXTERNAL_FLOAT64_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -4725,7 +4761,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases...external, fast-double, fast
- if (instr->is_typed_elements()) {
+ if (instr->is_fixed_typed_array()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@@ -6074,10 +6110,7 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
} else if (String::Equals(type_name, factory()->string_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- __ j(above_equal, false_label, false_distance);
- __ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- final_branch_condition = zero;
+ final_branch_condition = below;
} else if (String::Equals(type_name, factory()->symbol_string())) {
__ JumpIfSmi(input, false_label, false_distance);
@@ -6121,6 +6154,17 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
1 << Map::kIsUndetectable);
final_branch_condition = zero;
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(type_name, factory()->type##_string())) { \
+ __ JumpIfSmi(input, false_label, false_distance); \
+ __ cmp(FieldOperand(input, HeapObject::kMapOffset), \
+ factory()->type##_map()); \
+ final_branch_condition = equal;
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
+
} else {
__ jmp(false_label, false_distance);
}
diff --git a/deps/v8/src/x87/lithium-gap-resolver-x87.cc b/deps/v8/src/x87/lithium-gap-resolver-x87.cc
index b2f9b263c6..edafcb2b16 100644
--- a/deps/v8/src/x87/lithium-gap-resolver-x87.cc
+++ b/deps/v8/src/x87/lithium-gap-resolver-x87.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/x87/lithium-codegen-x87.h"
diff --git a/deps/v8/src/x87/lithium-gap-resolver-x87.h b/deps/v8/src/x87/lithium-gap-resolver-x87.h
index 5dfef8ad01..cdd26b8776 100644
--- a/deps/v8/src/x87/lithium-gap-resolver-x87.h
+++ b/deps/v8/src/x87/lithium-gap-resolver-x87.h
@@ -5,8 +5,6 @@
#ifndef V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
#define V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
-#include "src/v8.h"
-
#include "src/lithium.h"
namespace v8 {
diff --git a/deps/v8/src/x87/lithium-x87.cc b/deps/v8/src/x87/lithium-x87.cc
index a57aa91576..d382e4f6d5 100644
--- a/deps/v8/src/x87/lithium-x87.cc
+++ b/deps/v8/src/x87/lithium-x87.cc
@@ -4,8 +4,6 @@
#include <sstream>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/hydrogen-osr.h"
@@ -390,6 +388,11 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
}
+void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d", depth(), slot_index());
+}
+
+
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -408,6 +411,12 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
+void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
+ stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
+ value()->PrintTo(stream);
+}
+
+
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -1625,8 +1634,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else if (instr->representation().IsExternal()) {
- DCHECK(instr->left()->representation().IsExternal());
- DCHECK(instr->right()->representation().IsInteger32());
+ DCHECK(instr->IsConsistentExternalRepresentation());
DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
bool use_lea = LAddI::UseLea(instr);
LOperand* left = UseRegisterAtStart(instr->left());
@@ -2142,6 +2150,15 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
+ HLoadGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ DCHECK(instr->slot_index() > 0);
+ LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2218,7 +2235,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
: UseRegisterOrConstantAtStart(instr->key());
LInstruction* result = NULL;
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
} else {
@@ -2232,10 +2249,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
bool needs_environment;
- if (instr->is_external() || instr->is_fixed_typed_array()) {
+ if (instr->is_fixed_typed_array()) {
// see LCodeGen::DoLoadKeyedExternalArray
- needs_environment = (elements_kind == EXTERNAL_UINT32_ELEMENTS ||
- elements_kind == UINT32_ELEMENTS) &&
+ needs_environment = elements_kind == UINT32_ELEMENTS &&
!instr->CheckFlag(HInstruction::kUint32);
} else {
// see LCodeGen::DoLoadKeyedFixedDoubleArray and
@@ -2272,9 +2288,6 @@ LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
// Determine if we need a byte register in this case for the value.
bool val_is_fixed_register =
- elements_kind == EXTERNAL_INT8_ELEMENTS ||
- elements_kind == EXTERNAL_UINT8_ELEMENTS ||
- elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
elements_kind == UINT8_ELEMENTS ||
elements_kind == INT8_ELEMENTS ||
elements_kind == UINT8_CLAMPED_ELEMENTS;
@@ -2291,7 +2304,7 @@ LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_typed_elements()) {
+ if (!instr->is_fixed_typed_array()) {
DCHECK(instr->elements()->representation().IsTagged());
DCHECK(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsSmi());
@@ -2328,10 +2341,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
!IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->value()->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(elements_kind)));
- DCHECK((instr->is_fixed_typed_array() &&
- instr->elements()->representation().IsTagged()) ||
- (instr->is_external() &&
- instr->elements()->representation().IsExternal()));
+ DCHECK(instr->elements()->representation().IsExternal());
LOperand* backing_store = UseRegister(instr->elements());
LOperand* val = GetStoreKeyedValueOperand(instr);
@@ -2487,6 +2497,19 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
+ HStoreGlobalViaContext* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* value = UseFixed(instr->value(),
+ StoreGlobalViaContextDescriptor::ValueRegister());
+ DCHECK(instr->slot_index() > 0);
+
+ LStoreGlobalViaContext* result =
+ new (zone()) LStoreGlobalViaContext(context, value);
+ return MarkAsCall(result, instr);
+}
+
+
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
diff --git a/deps/v8/src/x87/lithium-x87.h b/deps/v8/src/x87/lithium-x87.h
index 1eedba1f48..3e6f67af16 100644
--- a/deps/v8/src/x87/lithium-x87.h
+++ b/deps/v8/src/x87/lithium-x87.h
@@ -106,6 +106,7 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
+ V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -146,6 +147,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
+ V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1647,15 +1649,9 @@ class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- bool is_external() const {
- return hydrogen()->is_external();
- }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@@ -1675,12 +1671,8 @@ inline static bool ExternalArrayOpRequiresTemp(
// an index cannot fold the scale operation into a load and need an extra
// temp register to do the work.
return key_representation.IsSmi() &&
- (elements_kind == EXTERNAL_INT8_ELEMENTS ||
- elements_kind == EXTERNAL_UINT8_ELEMENTS ||
- elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
- elements_kind == UINT8_ELEMENTS ||
- elements_kind == INT8_ELEMENTS ||
- elements_kind == UINT8_CLAMPED_ELEMENTS);
+ (elements_kind == UINT8_ELEMENTS || elements_kind == INT8_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS);
}
@@ -1721,7 +1713,23 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
+ TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+
+class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ LOperand* context() { return inputs_[0]; }
+
+ int depth() const { return hydrogen()->depth(); }
+ int slot_index() const { return hydrogen()->slot_index(); }
};
@@ -2213,6 +2221,28 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
+class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreGlobalViaContext(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
+ "store-global-via-context")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
+
+ void PrintDataTo(StringStream* stream) override;
+
+ int depth() { return hydrogen()->depth(); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
@@ -2221,13 +2251,9 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
inputs_[2] = val;
}
- bool is_external() const { return hydrogen()->is_external(); }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
- bool is_typed_elements() const {
- return is_external() || is_fixed_typed_array();
- }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
index 46c1830c05..1fab3aa7a3 100644
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ b/deps/v8/src/x87/macro-assembler-x87.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_X87
#include "src/base/bits.h"
@@ -11,8 +9,9 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/runtime/runtime.h"
+#include "src/x87/frames-x87.h"
namespace v8 {
namespace internal {
@@ -493,9 +492,10 @@ void MacroAssembler::RecordWrite(
void MacroAssembler::DebugBreak() {
Move(eax, Immediate(0));
- mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
+ mov(ebx, Immediate(ExternalReference(Runtime::kHandleDebuggerStatement,
+ isolate())));
CEntryStub ces(isolate(), 1);
- call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
}
@@ -1460,20 +1460,6 @@ void MacroAssembler::Allocate(Register object_size,
}
-void MacroAssembler::UndoAllocationInNewSpace(Register object) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- and_(object, Immediate(~kHeapObjectTagMask));
-#ifdef DEBUG
- cmp(object, Operand::StaticVariable(new_space_allocation_top));
- Check(below, kUndoAllocationOfNonAllocatedMemory);
-#endif
- mov(Operand::StaticVariable(new_space_allocation_top), object);
-}
-
-
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
@@ -3069,14 +3055,22 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
DCHECK(!scratch1.is(scratch0));
Factory* factory = isolate()->factory();
Register current = scratch0;
- Label loop_again;
+ Label loop_again, end;
// scratch contained elements pointer.
mov(current, object);
+ mov(current, FieldOperand(current, HeapObject::kMapOffset));
+ mov(current, FieldOperand(current, Map::kPrototypeOffset));
+ cmp(current, Immediate(factory->null_value()));
+ j(equal, &end);
// Loop based on the map going up the prototype chain.
bind(&loop_again);
mov(current, FieldOperand(current, HeapObject::kMapOffset));
+ STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ CmpInstanceType(current, JS_OBJECT_TYPE);
+ j(below, found);
mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
@@ -3084,6 +3078,8 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
mov(current, FieldOperand(current, Map::kPrototypeOffset));
cmp(current, Immediate(factory->null_value()));
j(not_equal, &loop_again);
+
+ bind(&end);
}
diff --git a/deps/v8/src/x87/macro-assembler-x87.h b/deps/v8/src/x87/macro-assembler-x87.h
index cbaabc89d0..9a2c903ab0 100644
--- a/deps/v8/src/x87/macro-assembler-x87.h
+++ b/deps/v8/src/x87/macro-assembler-x87.h
@@ -13,6 +13,19 @@
namespace v8 {
namespace internal {
+// Give alias names to registers for calling conventions.
+const Register kReturnRegister0 = {kRegister_eax_Code};
+const Register kReturnRegister1 = {kRegister_edx_Code};
+const Register kJSFunctionRegister = {kRegister_edi_Code};
+const Register kContextRegister = {kRegister_esi_Code};
+const Register kInterpreterAccumulatorRegister = {kRegister_eax_Code};
+const Register kInterpreterRegisterFileRegister = {kRegister_edx_Code};
+const Register kInterpreterBytecodeOffsetRegister = {kRegister_ecx_Code};
+const Register kInterpreterBytecodeArrayRegister = {kRegister_edi_Code};
+const Register kInterpreterDispatchTableRegister = {kRegister_ebx_Code};
+const Register kRuntimeCallFunctionRegister = {kRegister_ebx_Code};
+const Register kRuntimeCallArgCountRegister = {kRegister_eax_Code};
+
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
@@ -600,12 +613,6 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. Make sure that no pointers are left to the
- // object(s) no longer allocated as they would be invalid when allocation is
- // un-done.
- void UndoAllocationInNewSpace(Register object);
-
// Allocate a heap number in new space with undefined value. The
// register scratch2 can be passed as no_reg; the others must be
// valid registers. Returns tagged pointer in result register, or
@@ -985,7 +992,7 @@ class MacroAssembler: public Assembler {
class CodePatcher {
public:
CodePatcher(byte* address, int size);
- virtual ~CodePatcher();
+ ~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
@@ -1028,6 +1035,11 @@ inline Operand ContextOperand(Register context, int index) {
}
+inline Operand ContextOperand(Register context, Register index) {
+ return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
+}
+
+
inline Operand GlobalObjectOperand() {
return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX);
}
diff --git a/deps/v8/test/benchmarks/testcfg.py b/deps/v8/test/benchmarks/testcfg.py
index 2a65037754..0c3698bd60 100644
--- a/deps/v8/test/benchmarks/testcfg.py
+++ b/deps/v8/test/benchmarks/testcfg.py
@@ -31,10 +31,25 @@ import shutil
import subprocess
import tarfile
+from testrunner.local import statusfile
from testrunner.local import testsuite
from testrunner.objects import testcase
+class BenchmarksVariantGenerator(testsuite.VariantGenerator):
+ # Both --nocrankshaft and --stressopt are very slow. Add TF but without
+ # always opt to match the way the benchmarks are run for performance
+ # testing.
+ def FilterVariantsByTest(self, testcase):
+ if testcase.outcomes and statusfile.OnlyStandardVariant(
+ testcase.outcomes):
+ return self.standard_variant
+ return self.fast_variants
+
+ def GetFlagSets(self, testcase, variant):
+ return testsuite.FAST_VARIANT_FLAGS[variant]
+
+
class BenchmarksTestSuite(testsuite.TestSuite):
def __init__(self, name, root):
@@ -182,11 +197,8 @@ class BenchmarksTestSuite(testsuite.TestSuite):
os.chdir(old_cwd)
- def VariantFlags(self, testcase, default_flags):
- # Both --nocrankshaft and --stressopt are very slow. Add TF but without
- # always opt to match the way the benchmarks are run for performance
- # testing.
- return [[], ["--turbo"]]
+ def _VariantGeneratorFactory(self):
+ return BenchmarksVariantGenerator
def GetSuite(name, root):
diff --git a/deps/v8/test/cctest/OWNERS b/deps/v8/test/cctest/OWNERS
index 93565c5a7a..ea8a397300 100644
--- a/deps/v8/test/cctest/OWNERS
+++ b/deps/v8/test/cctest/OWNERS
@@ -3,3 +3,10 @@ per-file *-mips*=gergely.kis@imgtec.com
per-file *-mips*=akos.palfi@imgtec.com
per-file *-mips*=balazs.kilvady@imgtec.com
per-file *-mips*=dusan.milosavljevic@imgtec.com
+per-file *-ppc*=dstence@us.ibm.com
+per-file *-ppc*=joransiu@ca.ibm.com
+per-file *-ppc*=jyan@ca.ibm.com
+per-file *-ppc*=mbrandy@us.ibm.com
+per-file *-ppc*=michael_dawson@ca.ibm.com
+per-file *-x87*=chunyang.dai@intel.com
+per-file *-x87*=weiliang.lin@intel.com
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 851096ddce..72be29c383 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -29,7 +29,7 @@
#include "test/cctest/cctest.h"
#include "include/libplatform/libplatform.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "test/cctest/print-extension.h"
#include "test/cctest/profiler-extension.h"
#include "test/cctest/trace-extension.h"
@@ -93,6 +93,12 @@ void CcTest::Run() {
}
callback_();
if (initialize_) {
+ if (v8::Locker::IsActive()) {
+ v8::Locker locker(isolate_);
+ EmptyMessageQueues(isolate_);
+ } else {
+ EmptyMessageQueues(isolate_);
+ }
isolate_->Exit();
}
}
@@ -132,7 +138,10 @@ static void PrintTestList(CcTest* current) {
class CcTestArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
- virtual void* Allocate(size_t length) { return malloc(length); }
+ virtual void* Allocate(size_t length) {
+ void* data = AllocateUninitialized(length);
+ return data == NULL ? data : memset(data, 0, length);
+ }
virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
virtual void Free(void* data, size_t length) { free(data); }
// TODO(dslomov): Remove when v8:2823 is fixed.
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 8f0c58d38e..10207c1038 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -49,8 +49,6 @@
'compiler/codegen-tester.h',
'compiler/function-tester.h',
'compiler/graph-builder-tester.h',
- 'compiler/simplified-graph-builder.cc',
- 'compiler/simplified-graph-builder.h',
'compiler/test-basic-block-profiler.cc',
'compiler/test-branch-combine.cc',
'compiler/test-changes-lowering.cc',
@@ -78,12 +76,15 @@
'compiler/test-run-jsexceptions.cc',
'compiler/test-run-jsops.cc',
'compiler/test-run-machops.cc',
+ 'compiler/test-run-native-calls.cc',
'compiler/test-run-properties.cc',
'compiler/test-run-stackcheck.cc',
'compiler/test-run-stubs.cc',
'compiler/test-run-variables.cc',
'compiler/test-simplified-lowering.cc',
'cctest.cc',
+ 'interpreter/test-bytecode-generator.cc',
+ 'interpreter/test-interpreter.cc',
'gay-fixed.cc',
'gay-precision.cc',
'gay-shortest.cc',
@@ -145,6 +146,7 @@
'test-representation.cc',
'test-sampler-api.cc',
'test-serialize.cc',
+ 'test-simd.cc',
'test-spaces.cc',
'test-strings.cc',
'test-symbols.cc',
@@ -274,6 +276,11 @@
}, {
'dependencies': ['../../tools/gyp/v8.gyp:v8'],
}],
+ ['v8_wasm!=0', {
+ 'dependencies': [
+ '../../third_party/wasm/test/cctest/wasm/wasm.gyp:wasm_cctest'
+ ],
+ }],
],
},
{
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index cc9edc801f..5c19195208 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -28,6 +28,7 @@
#ifndef CCTEST_H_
#define CCTEST_H_
+#include "include/libplatform/libplatform.h"
#include "src/v8.h"
#ifndef TEST
@@ -107,6 +108,7 @@ class CcTest {
typedef void (TestFunction)();
CcTest(TestFunction* callback, const char* file, const char* name,
const char* dependency, bool enabled, bool initialize);
+ ~CcTest() { i::DeleteArray(file_); }
void Run();
static CcTest* last() { return last_; }
CcTest* prev() { return prev_; }
@@ -405,14 +407,6 @@ static inline v8::MaybeLocal<v8::Value> CompileRun(
}
-// Compiles source as an ES6 module.
-static inline v8::Local<v8::Value> CompileRunModule(const char* source) {
- v8::ScriptCompiler::Source script_source(v8_str(source));
- return v8::ScriptCompiler::CompileModule(v8::Isolate::GetCurrent(),
- &script_source)->Run();
-}
-
-
static inline v8::Local<v8::Value> CompileRun(v8::Local<v8::String> source) {
return v8::Script::Compile(source)->Run();
}
@@ -594,26 +588,11 @@ static inline void EnableDebugger() {
static inline void DisableDebugger() { v8::Debug::SetDebugEventListener(NULL); }
-// Helper class for new allocations tracking and checking.
-// To use checking of JS allocations tracking in a test,
-// just create an instance of this class.
-class HeapObjectsTracker {
- public:
- HeapObjectsTracker() {
- heap_profiler_ = i::Isolate::Current()->heap_profiler();
- CHECK_NOT_NULL(heap_profiler_);
- heap_profiler_->StartHeapObjectsTracking(true);
- }
-
- ~HeapObjectsTracker() {
- i::Isolate::Current()->heap()->CollectAllAvailableGarbage();
- CHECK_EQ(0, heap_profiler_->heap_object_map()->FindUntrackedObjects());
- heap_profiler_->StopHeapObjectsTracking();
- }
-
- private:
- i::HeapProfiler* heap_profiler_;
-};
+static inline void EmptyMessageQueues(v8::Isolate* isolate) {
+ while (v8::platform::PumpMessageLoop(v8::internal::V8::GetCurrentPlatform(),
+ isolate))
+ ;
+}
class InitializedHandleScope {
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 68c570edcc..013d0bf0da 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -96,6 +96,10 @@
# BUG(2340). Preprocessing stack traces is disabled at the moment.
'test-heap/PreprocessStackTrace': [FAIL],
+ # BUG(4333). Function name inferrer does not work for ES6 clases.
+ 'test-func-name-inference/UpperCaseClass': [FAIL],
+ 'test-func-name-inference/LowerCaseClass': [FAIL],
+
##############################################################################
# TurboFan compiler failures.
@@ -104,7 +108,6 @@
'test-heap-profiler/ManyLocalsInSharedContext': [PASS, NO_VARIANTS],
'test-serialize/SerializeToplevelLargeCodeObject': [PASS, NO_VARIANTS],
'test-debug/ThreadedDebugging': [PASS, NO_VARIANTS],
- 'test-debug/DebugBreakLoop': [PASS, NO_VARIANTS],
# BUG(3742).
'test-mark-compact/MarkCompactCollector': [PASS, ['arch==arm', NO_VARIANTS]],
@@ -127,6 +130,9 @@
# TODO(machenbach, mvstanton): Flaky in debug on all platforms.
'test-lockers/LockerUnlocker': [PASS, ['mode == debug', FLAKY]],
+
+ # BUG(4141).
+ 'test-alloc/CodeRange': [PASS, FLAKY],
}], # ALWAYS
##############################################################################
@@ -173,7 +179,6 @@
'test-api/ExternalFloatArray': [SKIP],
'test-api/Float32Array': [SKIP],
'test-api/Float64Array': [SKIP],
- 'test-debug/DebugBreakLoop': [SKIP],
}], # 'arch == arm64 and mode == debug and simulator_run == True'
##############################################################################
diff --git a/deps/v8/test/cctest/compiler/c-signature.h b/deps/v8/test/cctest/compiler/c-signature.h
index 83b3328a3b..8eaf6325f2 100644
--- a/deps/v8/test/cctest/compiler/c-signature.h
+++ b/deps/v8/test/cctest/compiler/c-signature.h
@@ -69,6 +69,10 @@ class CSignature : public MachineSignature {
}
}
+ static CSignature* FromMachine(Zone* zone, MachineSignature* msig) {
+ return reinterpret_cast<CSignature*>(msig);
+ }
+
static CSignature* New(Zone* zone, MachineType ret,
MachineType p1 = kMachNone, MachineType p2 = kMachNone,
MachineType p3 = kMachNone, MachineType p4 = kMachNone,
diff --git a/deps/v8/test/cctest/compiler/call-tester.h b/deps/v8/test/cctest/compiler/call-tester.h
index dc265ea5fa..31a6d0f93b 100644
--- a/deps/v8/test/cctest/compiler/call-tester.h
+++ b/deps/v8/test/cctest/compiler/call-tester.h
@@ -304,6 +304,21 @@ class CallHelper {
Isolate* isolate_;
};
+// A call helper that calls the given code object assuming C calling convention.
+template <typename T>
+class CodeRunner : public CallHelper<T> {
+ public:
+ CodeRunner(Isolate* isolate, Handle<Code> code, CSignature* csig)
+ : CallHelper<T>(isolate, csig), code_(code) {}
+ virtual ~CodeRunner() {}
+
+ virtual byte* Generate() { return code_->entry(); }
+
+ private:
+ Handle<Code> code_;
+};
+
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.cc b/deps/v8/test/cctest/compiler/codegen-tester.cc
index d05b282293..98957c7f01 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.cc
+++ b/deps/v8/test/cctest/compiler/codegen-tester.cc
@@ -368,8 +368,6 @@ void Int32BinopInputShapeTester::RunRight(
}
-#if V8_TURBOFAN_TARGET
-
TEST(ParametersEqual) {
RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
Node* p1 = m.Parameter(1);
@@ -572,5 +570,3 @@ TEST(RunBinopTester) {
FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(*i, bt.call(-11.25, *i)); }
}
}
-
-#endif // V8_TURBOFAN_TARGET
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index bc6d938ce1..d8ecc02fc2 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -34,8 +34,10 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
p2, p3, p4)),
RawMachineAssembler(
main_isolate(), new (main_zone()) Graph(main_zone()),
- CSignature::New(main_zone(), MachineTypeForC<ReturnType>(), p0, p1,
- p2, p3, p4),
+ Linkage::GetSimplifiedCDescriptor(
+ main_zone(),
+ CSignature::New(main_zone(), MachineTypeForC<ReturnType>(), p0,
+ p1, p2, p3, p4)),
kMachPtr, InstructionSelector::SupportedMachineOperatorFlags()) {}
void CheckNumber(double expected, Object* number) {
diff --git a/deps/v8/test/cctest/compiler/function-tester.h b/deps/v8/test/cctest/compiler/function-tester.h
index 54c62ab634..56ab514c65 100644
--- a/deps/v8/test/cctest/compiler/function-tester.h
+++ b/deps/v8/test/cctest/compiler/function-tester.h
@@ -13,15 +13,13 @@
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
#include "src/execution.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/handles.h"
#include "src/objects-inl.h"
#include "src/parser.h"
#include "src/rewriter.h"
#include "src/scopes.h"
-#define USE_CRANKSHAFT 0
-
namespace v8 {
namespace internal {
namespace compiler {
@@ -156,7 +154,6 @@ class FunctionTester : public InitializedHandleScope {
Handle<JSFunction> Compile(Handle<JSFunction> function) {
// TODO(titzer): make this method private.
-#if V8_TURBOFAN_TARGET
Zone zone;
ParseInfo parse_info(&zone, function);
CompilationInfo info(&parse_info);
@@ -181,19 +178,6 @@ class FunctionTester : public InitializedHandleScope {
CHECK(!code.is_null());
info.context()->native_context()->AddOptimizedCode(*code);
function->ReplaceCode(*code);
-#elif USE_CRANKSHAFT
- Handle<Code> unoptimized = Handle<Code>(function->code());
- Handle<Code> code = Compiler::GetOptimizedCode(function, unoptimized,
- Compiler::NOT_CONCURRENT);
- CHECK(!code.is_null());
-#if ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- code->Disassemble("test code", tracing_scope.file());
- }
-#endif
- function->ReplaceCode(*code);
-#endif
return function;
}
@@ -212,7 +196,6 @@ class FunctionTester : public InitializedHandleScope {
// Compile the given machine graph instead of the source of the function
// and replace the JSFunction's code with the result.
Handle<JSFunction> CompileGraph(Graph* graph) {
- CHECK(Pipeline::SupportedTarget());
Zone zone;
ParseInfo parse_info(&zone, function);
CompilationInfo info(&parse_info);
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h
index 7270293e0f..41c1e384be 100644
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.h
+++ b/deps/v8/test/cctest/compiler/graph-builder-tester.h
@@ -9,13 +9,12 @@
#include "test/cctest/cctest.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-builder.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
+#include "src/compiler/operator-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/simplified-operator.h"
#include "test/cctest/compiler/call-tester.h"
-#include "test/cctest/compiler/simplified-graph-builder.h"
namespace v8 {
namespace internal {
@@ -29,6 +28,12 @@ class GraphAndBuilders {
main_machine_(zone),
main_simplified_(zone) {}
+ Graph* graph() const { return main_graph_; }
+ Zone* zone() const { return graph()->zone(); }
+ CommonOperatorBuilder* common() { return &main_common_; }
+ MachineOperatorBuilder* machine() { return &main_machine_; }
+ SimplifiedOperatorBuilder* simplified() { return &main_simplified_; }
+
protected:
// Prefixed with main_ to avoid naming conflicts.
Graph* main_graph_;
@@ -40,9 +45,8 @@ class GraphAndBuilders {
template <typename ReturnType>
class GraphBuilderTester : public HandleAndZoneScope,
- private GraphAndBuilders,
- public CallHelper<ReturnType>,
- public SimplifiedGraphBuilder {
+ public GraphAndBuilders,
+ public CallHelper<ReturnType> {
public:
explicit GraphBuilderTester(MachineType p0 = kMachNone,
MachineType p1 = kMachNone,
@@ -54,8 +58,8 @@ class GraphBuilderTester : public HandleAndZoneScope,
main_isolate(),
CSignature::New(main_zone(), MachineTypeForC<ReturnType>(), p0, p1,
p2, p3, p4)),
- SimplifiedGraphBuilder(main_isolate(), main_graph_, &main_common_,
- &main_machine_, &main_simplified_),
+ effect_(NULL),
+ return_(NULL),
parameters_(main_zone()->template NewArray<Node*>(parameter_count())) {
Begin(static_cast<int>(parameter_count()));
InitParameters();
@@ -68,16 +72,214 @@ class GraphBuilderTester : public HandleAndZoneScope,
return parameters_[index];
}
- Factory* factory() const { return isolate()->factory(); }
+ Isolate* isolate() { return main_isolate(); }
+ Factory* factory() { return isolate()->factory(); }
+
+ // Initialize graph and builder.
+ void Begin(int num_parameters) {
+ DCHECK(graph()->start() == NULL);
+ Node* start = graph()->NewNode(common()->Start(num_parameters + 3));
+ graph()->SetStart(start);
+ effect_ = start;
+ }
+
+ void Return(Node* value) {
+ return_ =
+ graph()->NewNode(common()->Return(), value, effect_, graph()->start());
+ effect_ = NULL;
+ }
+
+ // Close the graph.
+ void End() {
+ Node* end = graph()->NewNode(common()->End(1), return_);
+ graph()->SetEnd(end);
+ }
+
+ Node* PointerConstant(void* value) {
+ intptr_t intptr_value = reinterpret_cast<intptr_t>(value);
+ return kPointerSize == 8 ? NewNode(common()->Int64Constant(intptr_value))
+ : Int32Constant(static_cast<int>(intptr_value));
+ }
+ Node* Int32Constant(int32_t value) {
+ return NewNode(common()->Int32Constant(value));
+ }
+ Node* HeapConstant(Handle<HeapObject> object) {
+ Unique<HeapObject> val = Unique<HeapObject>::CreateUninitialized(object);
+ return NewNode(common()->HeapConstant(val));
+ }
+
+ Node* BooleanNot(Node* a) { return NewNode(simplified()->BooleanNot(), a); }
+
+ Node* NumberEqual(Node* a, Node* b) {
+ return NewNode(simplified()->NumberEqual(), a, b);
+ }
+ Node* NumberLessThan(Node* a, Node* b) {
+ return NewNode(simplified()->NumberLessThan(), a, b);
+ }
+ Node* NumberLessThanOrEqual(Node* a, Node* b) {
+ return NewNode(simplified()->NumberLessThanOrEqual(), a, b);
+ }
+ Node* NumberAdd(Node* a, Node* b) {
+ return NewNode(simplified()->NumberAdd(), a, b);
+ }
+ Node* NumberSubtract(Node* a, Node* b) {
+ return NewNode(simplified()->NumberSubtract(), a, b);
+ }
+ Node* NumberMultiply(Node* a, Node* b) {
+ return NewNode(simplified()->NumberMultiply(), a, b);
+ }
+ Node* NumberDivide(Node* a, Node* b) {
+ return NewNode(simplified()->NumberDivide(), a, b);
+ }
+ Node* NumberModulus(Node* a, Node* b) {
+ return NewNode(simplified()->NumberModulus(), a, b);
+ }
+ Node* NumberToInt32(Node* a) {
+ return NewNode(simplified()->NumberToInt32(), a);
+ }
+ Node* NumberToUint32(Node* a) {
+ return NewNode(simplified()->NumberToUint32(), a);
+ }
+
+ Node* StringEqual(Node* a, Node* b) {
+ return NewNode(simplified()->StringEqual(), a, b);
+ }
+ Node* StringLessThan(Node* a, Node* b) {
+ return NewNode(simplified()->StringLessThan(), a, b);
+ }
+ Node* StringLessThanOrEqual(Node* a, Node* b) {
+ return NewNode(simplified()->StringLessThanOrEqual(), a, b);
+ }
+
+ Node* ChangeTaggedToInt32(Node* a) {
+ return NewNode(simplified()->ChangeTaggedToInt32(), a);
+ }
+ Node* ChangeTaggedToUint32(Node* a) {
+ return NewNode(simplified()->ChangeTaggedToUint32(), a);
+ }
+ Node* ChangeTaggedToFloat64(Node* a) {
+ return NewNode(simplified()->ChangeTaggedToFloat64(), a);
+ }
+ Node* ChangeInt32ToTagged(Node* a) {
+ return NewNode(simplified()->ChangeInt32ToTagged(), a);
+ }
+ Node* ChangeUint32ToTagged(Node* a) {
+ return NewNode(simplified()->ChangeUint32ToTagged(), a);
+ }
+ Node* ChangeFloat64ToTagged(Node* a) {
+ return NewNode(simplified()->ChangeFloat64ToTagged(), a);
+ }
+ Node* ChangeBoolToBit(Node* a) {
+ return NewNode(simplified()->ChangeBoolToBit(), a);
+ }
+ Node* ChangeBitToBool(Node* a) {
+ return NewNode(simplified()->ChangeBitToBool(), a);
+ }
+
+ Node* LoadField(const FieldAccess& access, Node* object) {
+ return NewNode(simplified()->LoadField(access), object);
+ }
+ Node* StoreField(const FieldAccess& access, Node* object, Node* value) {
+ return NewNode(simplified()->StoreField(access), object, value);
+ }
+ Node* LoadElement(const ElementAccess& access, Node* object, Node* index) {
+ return NewNode(simplified()->LoadElement(access), object, index);
+ }
+ Node* StoreElement(const ElementAccess& access, Node* object, Node* index,
+ Node* value) {
+ return NewNode(simplified()->StoreElement(access), object, index, value);
+ }
+
+ Node* NewNode(const Operator* op) {
+ return MakeNode(op, 0, static_cast<Node**>(NULL));
+ }
+
+ Node* NewNode(const Operator* op, Node* n1) { return MakeNode(op, 1, &n1); }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2) {
+ Node* buffer[] = {n1, n2};
+ return MakeNode(op, arraysize(buffer), buffer);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
+ Node* buffer[] = {n1, n2, n3};
+ return MakeNode(op, arraysize(buffer), buffer);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+ Node* buffer[] = {n1, n2, n3, n4};
+ return MakeNode(op, arraysize(buffer), buffer);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5) {
+ Node* buffer[] = {n1, n2, n3, n4, n5};
+ return MakeNode(op, arraysize(buffer), buffer);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6};
+ return MakeNode(op, arraysize(nodes), nodes);
+ }
+
+ Node* NewNode(const Operator* op, int value_input_count,
+ Node** value_inputs) {
+ return MakeNode(op, value_input_count, value_inputs);
+ }
protected:
+ Node* MakeNode(const Operator* op, int value_input_count,
+ Node** value_inputs) {
+ DCHECK(op->ValueInputCount() == value_input_count);
+
+ DCHECK(!OperatorProperties::HasContextInput(op));
+ DCHECK_EQ(0, OperatorProperties::GetFrameStateInputCount(op));
+ bool has_control = op->ControlInputCount() == 1;
+ bool has_effect = op->EffectInputCount() == 1;
+
+ DCHECK(op->ControlInputCount() < 2);
+ DCHECK(op->EffectInputCount() < 2);
+
+ Node* result = NULL;
+ if (!has_control && !has_effect) {
+ result = graph()->NewNode(op, value_input_count, value_inputs);
+ } else {
+ int input_count_with_deps = value_input_count;
+ if (has_control) ++input_count_with_deps;
+ if (has_effect) ++input_count_with_deps;
+ Node** buffer = zone()->template NewArray<Node*>(input_count_with_deps);
+ memcpy(buffer, value_inputs, kPointerSize * value_input_count);
+ Node** current_input = buffer + value_input_count;
+ if (has_effect) {
+ *current_input++ = effect_;
+ }
+ if (has_control) {
+ *current_input++ = graph()->start();
+ }
+ result = graph()->NewNode(op, input_count_with_deps, buffer);
+ if (has_effect) {
+ effect_ = result;
+ }
+ // This graph builder does not support control flow.
+ CHECK_EQ(0, op->ControlOutputCount());
+ }
+
+ return result;
+ }
+
virtual byte* Generate() {
- if (!Pipeline::SupportedBackend()) return NULL;
if (code_.is_null()) {
Zone* zone = graph()->zone();
CallDescriptor* desc =
Linkage::GetSimplifiedCDescriptor(zone, this->csig_);
code_ = Pipeline::GenerateCodeForTesting(main_isolate(), desc, graph());
+#ifdef ENABLE_DISASSEMBLER
+ if (!code_.is_null() && FLAG_print_opt_code) {
+ OFStream os(stdout);
+ code_.ToHandleChecked()->Disassemble("test code", os);
+ }
+#endif
}
return code_.ToHandleChecked()->entry();
}
@@ -92,6 +294,8 @@ class GraphBuilderTester : public HandleAndZoneScope,
size_t parameter_count() const { return this->csig_->parameter_count(); }
private:
+ Node* effect_;
+ Node* return_;
Node** parameters_;
MaybeHandle<Code> code_;
};
diff --git a/deps/v8/test/cctest/compiler/instruction-selector-tester.h b/deps/v8/test/cctest/compiler/instruction-selector-tester.h
deleted file mode 100644
index 3a28b2e5df..0000000000
--- a/deps/v8/test/cctest/compiler/instruction-selector-tester.h
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CCTEST_COMPILER_INSTRUCTION_SELECTOR_TEST_H_
-#define V8_CCTEST_COMPILER_INSTRUCTION_SELECTOR_TEST_H_
-
-#include <deque>
-#include <set>
-
-#include "src/compiler/instruction-selector.h"
-#include "src/compiler/raw-machine-assembler.h"
-#include "src/ostreams.h"
-#include "test/cctest/cctest.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-typedef std::set<int> VirtualRegisterSet;
-
-enum InstructionSelectorTesterMode { kTargetMode, kInternalMode };
-
-class InstructionSelectorTester : public HandleAndZoneScope,
- public RawMachineAssembler {
- public:
- enum Mode { kTargetMode, kInternalMode };
-
- static const int kParameterCount = 3;
- static MachineType* BuildParameterArray(Zone* zone) {
- MachineType* array = zone->NewArray<MachineType>(kParameterCount);
- for (int i = 0; i < kParameterCount; ++i) {
- array[i] = kMachInt32;
- }
- return array;
- }
-
- InstructionSelectorTester()
- : RawMachineAssembler(
- new (main_zone()) Graph(main_zone()),
- new (main_zone()) MachineCallDescriptorBuilder(
- kMachInt32, kParameterCount, BuildParameterArray(main_zone())),
- kMachPtr) {}
-
- void SelectInstructions(CpuFeature feature) {
- SelectInstructions(InstructionSelector::Features(feature));
- }
-
- void SelectInstructions(CpuFeature feature1, CpuFeature feature2) {
- SelectInstructions(InstructionSelector::Features(feature1, feature2));
- }
-
- void SelectInstructions(Mode mode = kTargetMode) {
- SelectInstructions(InstructionSelector::Features(), mode);
- }
-
- void SelectInstructions(InstructionSelector::Features features,
- Mode mode = kTargetMode) {
- OFStream out(stdout);
- Schedule* schedule = Export();
- CHECK_NE(0, graph()->NodeCount());
- CompilationInfo info(main_isolate(), main_zone());
- Linkage linkage(&info, call_descriptor());
- InstructionSequence sequence(&linkage, graph(), schedule);
- SourcePositionTable source_positions(graph());
- InstructionSelector selector(&sequence, &source_positions, features);
- selector.SelectInstructions();
- out << "--- Code sequence after instruction selection --- " << endl
- << sequence;
- for (InstructionSequence::const_iterator i = sequence.begin();
- i != sequence.end(); ++i) {
- Instruction* instr = *i;
- if (instr->opcode() < 0) continue;
- if (mode == kTargetMode) {
- switch (ArchOpcodeField::decode(instr->opcode())) {
-#define CASE(Name) \
- case k##Name: \
- break;
- TARGET_ARCH_OPCODE_LIST(CASE)
-#undef CASE
- default:
- continue;
- }
- }
- code.push_back(instr);
- }
- for (int vreg = 0; vreg < sequence.VirtualRegisterCount(); ++vreg) {
- if (sequence.IsDouble(vreg)) {
- CHECK(!sequence.IsReference(vreg));
- doubles.insert(vreg);
- }
- if (sequence.IsReference(vreg)) {
- CHECK(!sequence.IsDouble(vreg));
- references.insert(vreg);
- }
- }
- immediates.assign(sequence.immediates().begin(),
- sequence.immediates().end());
- }
-
- int32_t ToInt32(const InstructionOperand* operand) const {
- size_t i = operand->index();
- CHECK(i < immediates.size());
- CHECK_EQ(InstructionOperand::IMMEDIATE, operand->kind());
- return immediates[i].ToInt32();
- }
-
- std::deque<Instruction*> code;
- VirtualRegisterSet doubles;
- VirtualRegisterSet references;
- std::deque<Constant> immediates;
-};
-
-
-static inline void CheckSameVreg(InstructionOperand* exp,
- InstructionOperand* val) {
- CHECK_EQ(InstructionOperand::UNALLOCATED, exp->kind());
- CHECK_EQ(InstructionOperand::UNALLOCATED, val->kind());
- CHECK_EQ(UnallocatedOperand::cast(exp)->virtual_register(),
- UnallocatedOperand::cast(val)->virtual_register());
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CCTEST_COMPILER_INSTRUCTION_SELECTOR_TEST_H_
diff --git a/deps/v8/test/cctest/compiler/simplified-graph-builder.cc b/deps/v8/test/cctest/compiler/simplified-graph-builder.cc
deleted file mode 100644
index 4d57719eff..0000000000
--- a/deps/v8/test/cctest/compiler/simplified-graph-builder.cc
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "test/cctest/compiler/simplified-graph-builder.h"
-
-#include "src/compiler/operator-properties.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-SimplifiedGraphBuilder::SimplifiedGraphBuilder(
- Isolate* isolate, Graph* graph, CommonOperatorBuilder* common,
- MachineOperatorBuilder* machine, SimplifiedOperatorBuilder* simplified)
- : GraphBuilder(isolate, graph),
- effect_(NULL),
- return_(NULL),
- common_(common),
- machine_(machine),
- simplified_(simplified) {}
-
-
-void SimplifiedGraphBuilder::Begin(int num_parameters) {
- DCHECK(graph()->start() == NULL);
- Node* start = graph()->NewNode(common()->Start(num_parameters + 3));
- graph()->SetStart(start);
- effect_ = start;
-}
-
-
-void SimplifiedGraphBuilder::Return(Node* value) {
- return_ =
- graph()->NewNode(common()->Return(), value, effect_, graph()->start());
- effect_ = NULL;
-}
-
-
-void SimplifiedGraphBuilder::End() {
- Node* end = graph()->NewNode(common()->End(1), return_);
- graph()->SetEnd(end);
-}
-
-
-Node* SimplifiedGraphBuilder::MakeNode(const Operator* op,
- int value_input_count,
- Node** value_inputs, bool incomplete) {
- DCHECK(op->ValueInputCount() == value_input_count);
-
- DCHECK(!OperatorProperties::HasContextInput(op));
- DCHECK_EQ(0, OperatorProperties::GetFrameStateInputCount(op));
- bool has_control = op->ControlInputCount() == 1;
- bool has_effect = op->EffectInputCount() == 1;
-
- DCHECK(op->ControlInputCount() < 2);
- DCHECK(op->EffectInputCount() < 2);
-
- Node* result = NULL;
- if (!has_control && !has_effect) {
- result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
- } else {
- int input_count_with_deps = value_input_count;
- if (has_control) ++input_count_with_deps;
- if (has_effect) ++input_count_with_deps;
- Node** buffer = zone()->NewArray<Node*>(input_count_with_deps);
- memcpy(buffer, value_inputs, kPointerSize * value_input_count);
- Node** current_input = buffer + value_input_count;
- if (has_effect) {
- *current_input++ = effect_;
- }
- if (has_control) {
- *current_input++ = graph()->start();
- }
- result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete);
- if (has_effect) {
- effect_ = result;
- }
- // This graph builder does not support control flow.
- CHECK_EQ(0, op->ControlOutputCount());
- }
-
- return result;
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/simplified-graph-builder.h b/deps/v8/test/cctest/compiler/simplified-graph-builder.h
deleted file mode 100644
index 50c51d5ed8..0000000000
--- a/deps/v8/test/cctest/compiler/simplified-graph-builder.h
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CCTEST_COMPILER_SIMPLIFIED_GRAPH_BUILDER_H_
-#define V8_CCTEST_COMPILER_SIMPLIFIED_GRAPH_BUILDER_H_
-
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-builder.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/simplified-operator.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/compiler/call-tester.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class SimplifiedGraphBuilder : public GraphBuilder {
- public:
- SimplifiedGraphBuilder(Isolate* isolate, Graph* graph,
- CommonOperatorBuilder* common,
- MachineOperatorBuilder* machine,
- SimplifiedOperatorBuilder* simplified);
- virtual ~SimplifiedGraphBuilder() {}
-
- Zone* zone() const { return graph()->zone(); }
- CommonOperatorBuilder* common() const { return common_; }
- MachineOperatorBuilder* machine() const { return machine_; }
- SimplifiedOperatorBuilder* simplified() const { return simplified_; }
-
- // Initialize graph and builder.
- void Begin(int num_parameters);
-
- void Return(Node* value);
-
- // Close the graph.
- void End();
-
- Node* PointerConstant(void* value) {
- intptr_t intptr_value = reinterpret_cast<intptr_t>(value);
- return kPointerSize == 8 ? NewNode(common()->Int64Constant(intptr_value))
- : Int32Constant(static_cast<int>(intptr_value));
- }
- Node* Int32Constant(int32_t value) {
- return NewNode(common()->Int32Constant(value));
- }
- Node* HeapConstant(Handle<HeapObject> object) {
- Unique<HeapObject> val = Unique<HeapObject>::CreateUninitialized(object);
- return NewNode(common()->HeapConstant(val));
- }
-
- Node* BooleanNot(Node* a) { return NewNode(simplified()->BooleanNot(), a); }
-
- Node* NumberEqual(Node* a, Node* b) {
- return NewNode(simplified()->NumberEqual(), a, b);
- }
- Node* NumberLessThan(Node* a, Node* b) {
- return NewNode(simplified()->NumberLessThan(), a, b);
- }
- Node* NumberLessThanOrEqual(Node* a, Node* b) {
- return NewNode(simplified()->NumberLessThanOrEqual(), a, b);
- }
- Node* NumberAdd(Node* a, Node* b) {
- return NewNode(simplified()->NumberAdd(), a, b);
- }
- Node* NumberSubtract(Node* a, Node* b) {
- return NewNode(simplified()->NumberSubtract(), a, b);
- }
- Node* NumberMultiply(Node* a, Node* b) {
- return NewNode(simplified()->NumberMultiply(), a, b);
- }
- Node* NumberDivide(Node* a, Node* b) {
- return NewNode(simplified()->NumberDivide(), a, b);
- }
- Node* NumberModulus(Node* a, Node* b) {
- return NewNode(simplified()->NumberModulus(), a, b);
- }
- Node* NumberToInt32(Node* a) {
- return NewNode(simplified()->NumberToInt32(), a);
- }
- Node* NumberToUint32(Node* a) {
- return NewNode(simplified()->NumberToUint32(), a);
- }
-
- Node* StringEqual(Node* a, Node* b) {
- return NewNode(simplified()->StringEqual(), a, b);
- }
- Node* StringLessThan(Node* a, Node* b) {
- return NewNode(simplified()->StringLessThan(), a, b);
- }
- Node* StringLessThanOrEqual(Node* a, Node* b) {
- return NewNode(simplified()->StringLessThanOrEqual(), a, b);
- }
-
- Node* ChangeTaggedToInt32(Node* a) {
- return NewNode(simplified()->ChangeTaggedToInt32(), a);
- }
- Node* ChangeTaggedToUint32(Node* a) {
- return NewNode(simplified()->ChangeTaggedToUint32(), a);
- }
- Node* ChangeTaggedToFloat64(Node* a) {
- return NewNode(simplified()->ChangeTaggedToFloat64(), a);
- }
- Node* ChangeInt32ToTagged(Node* a) {
- return NewNode(simplified()->ChangeInt32ToTagged(), a);
- }
- Node* ChangeUint32ToTagged(Node* a) {
- return NewNode(simplified()->ChangeUint32ToTagged(), a);
- }
- Node* ChangeFloat64ToTagged(Node* a) {
- return NewNode(simplified()->ChangeFloat64ToTagged(), a);
- }
- Node* ChangeBoolToBit(Node* a) {
- return NewNode(simplified()->ChangeBoolToBit(), a);
- }
- Node* ChangeBitToBool(Node* a) {
- return NewNode(simplified()->ChangeBitToBool(), a);
- }
-
- Node* LoadField(const FieldAccess& access, Node* object) {
- return NewNode(simplified()->LoadField(access), object);
- }
- Node* StoreField(const FieldAccess& access, Node* object, Node* value) {
- return NewNode(simplified()->StoreField(access), object, value);
- }
- Node* LoadElement(const ElementAccess& access, Node* object, Node* index) {
- return NewNode(simplified()->LoadElement(access), object, index);
- }
- Node* StoreElement(const ElementAccess& access, Node* object, Node* index,
- Node* value) {
- return NewNode(simplified()->StoreElement(access), object, index, value);
- }
-
- protected:
- virtual Node* MakeNode(const Operator* op, int value_input_count,
- Node** value_inputs, bool incomplete) final;
-
- private:
- Node* effect_;
- Node* return_;
- CommonOperatorBuilder* common_;
- MachineOperatorBuilder* machine_;
- SimplifiedOperatorBuilder* simplified_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CCTEST_COMPILER_SIMPLIFIED_GRAPH_BUILDER_H_
diff --git a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
index fa4da9a736..7d7690bad6 100644
--- a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
+++ b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
@@ -8,8 +8,6 @@
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
-#if V8_TURBOFAN_TARGET
-
using namespace v8::internal;
using namespace v8::internal::compiler;
@@ -110,5 +108,3 @@ TEST(ProfileLoop) {
m.Expect(arraysize(expected), expected);
}
}
-
-#endif // V8_TURBOFAN_TARGET
diff --git a/deps/v8/test/cctest/compiler/test-branch-combine.cc b/deps/v8/test/cctest/compiler/test-branch-combine.cc
index 58202a61b0..06d380a6a2 100644
--- a/deps/v8/test/cctest/compiler/test-branch-combine.cc
+++ b/deps/v8/test/cctest/compiler/test-branch-combine.cc
@@ -8,8 +8,6 @@
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
-#if V8_TURBOFAN_TARGET
-
using namespace v8::internal;
using namespace v8::internal::compiler;
@@ -459,4 +457,3 @@ TEST(BranchCombineFloat64Compares) {
}
}
}
-#endif // V8_TURBOFAN_TARGET
diff --git a/deps/v8/test/cctest/compiler/test-changes-lowering.cc b/deps/v8/test/cctest/compiler/test-changes-lowering.cc
index 04b5b9176b..b6b48bdac4 100644
--- a/deps/v8/test/cctest/compiler/test-changes-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-changes-lowering.cc
@@ -147,7 +147,6 @@ TEST(RunChangeTaggedToInt32) {
ChangesLoweringTester<int32_t> t(kMachAnyTagged);
t.BuildAndLower(t.simplified()->ChangeTaggedToInt32());
- if (Pipeline::SupportedTarget()) {
FOR_INT32_INPUTS(i) {
int32_t input = *i;
@@ -167,7 +166,6 @@ TEST(RunChangeTaggedToInt32) {
int32_t result = t.Call(*number);
CHECK_EQ(input, result);
}
- }
}
}
@@ -177,7 +175,6 @@ TEST(RunChangeTaggedToUint32) {
ChangesLoweringTester<uint32_t> t(kMachAnyTagged);
t.BuildAndLower(t.simplified()->ChangeTaggedToUint32());
- if (Pipeline::SupportedTarget()) {
FOR_UINT32_INPUTS(i) {
uint32_t input = *i;
@@ -198,7 +195,6 @@ TEST(RunChangeTaggedToUint32) {
CHECK_EQ(static_cast<int32_t>(input), static_cast<int32_t>(result));
}
}
- }
}
@@ -211,7 +207,7 @@ TEST(RunChangeTaggedToFloat64) {
t.machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)),
&result);
- if (Pipeline::SupportedTarget()) {
+ {
FOR_INT32_INPUTS(i) {
int32_t input = *i;
@@ -234,7 +230,7 @@ TEST(RunChangeTaggedToFloat64) {
}
}
- if (Pipeline::SupportedTarget()) {
+ {
FOR_FLOAT64_INPUTS(i) {
double input = *i;
{
@@ -257,13 +253,13 @@ TEST(RunChangeBoolToBit) {
ChangesLoweringTester<int32_t> t(kMachAnyTagged);
t.BuildAndLower(t.simplified()->ChangeBoolToBit());
- if (Pipeline::SupportedTarget()) {
+ {
Object* true_obj = t.heap()->true_value();
int32_t result = t.Call(true_obj);
CHECK_EQ(1, result);
}
- if (Pipeline::SupportedTarget()) {
+ {
Object* false_obj = t.heap()->false_value();
int32_t result = t.Call(false_obj);
CHECK_EQ(0, result);
@@ -275,122 +271,15 @@ TEST(RunChangeBitToBool) {
ChangesLoweringTester<Object*> t(kMachInt32);
t.BuildAndLower(t.simplified()->ChangeBitToBool());
- if (Pipeline::SupportedTarget()) {
+ {
Object* result = t.Call(1);
Object* true_obj = t.heap()->true_value();
CHECK_EQ(true_obj, result);
}
- if (Pipeline::SupportedTarget()) {
+ {
Object* result = t.Call(0);
Object* false_obj = t.heap()->false_value();
CHECK_EQ(false_obj, result);
}
}
-
-
-#if V8_TURBOFAN_BACKEND
-// TODO(titzer): disabled on ARM
-
-TEST(RunChangeInt32ToTaggedSmi) {
- ChangesLoweringTester<Object*> t;
- int32_t input;
- t.BuildLoadAndLower(t.simplified()->ChangeInt32ToTagged(),
- t.machine()->Load(kMachInt32), &input);
-
- if (Pipeline::SupportedTarget()) {
- FOR_INT32_INPUTS(i) {
- input = *i;
- if (!Smi::IsValid(input)) continue;
- Object* result = t.Call();
- t.CheckNumber(static_cast<double>(input), result);
- }
- }
-}
-
-
-TEST(RunChangeUint32ToTaggedSmi) {
- ChangesLoweringTester<Object*> t;
- uint32_t input;
- t.BuildLoadAndLower(t.simplified()->ChangeUint32ToTagged(),
- t.machine()->Load(kMachUint32), &input);
-
- if (Pipeline::SupportedTarget()) {
- FOR_UINT32_INPUTS(i) {
- input = *i;
- if (input > static_cast<uint32_t>(Smi::kMaxValue)) continue;
- Object* result = t.Call();
- double expected = static_cast<double>(input);
- t.CheckNumber(expected, result);
- }
- }
-}
-
-
-TEST(RunChangeInt32ToTagged) {
- ChangesLoweringTester<Object*> t;
- int32_t input;
- t.BuildLoadAndLower(t.simplified()->ChangeInt32ToTagged(),
- t.machine()->Load(kMachInt32), &input);
-
- if (Pipeline::SupportedTarget()) {
- for (int m = 0; m < 3; m++) { // Try 3 GC modes.
- FOR_INT32_INPUTS(i) {
- if (m == 0) CcTest::heap()->EnableInlineAllocation();
- if (m == 1) CcTest::heap()->DisableInlineAllocation();
- if (m == 2) SimulateFullSpace(CcTest::heap()->new_space());
-
- input = *i;
- Object* result = t.CallWithPotentialGC<Object>();
- t.CheckNumber(static_cast<double>(input), result);
- }
- }
- }
-}
-
-
-TEST(RunChangeUint32ToTagged) {
- ChangesLoweringTester<Object*> t;
- uint32_t input;
- t.BuildLoadAndLower(t.simplified()->ChangeUint32ToTagged(),
- t.machine()->Load(kMachUint32), &input);
-
- if (Pipeline::SupportedTarget()) {
- for (int m = 0; m < 3; m++) { // Try 3 GC modes.
- FOR_UINT32_INPUTS(i) {
- if (m == 0) CcTest::heap()->EnableInlineAllocation();
- if (m == 1) CcTest::heap()->DisableInlineAllocation();
- if (m == 2) SimulateFullSpace(CcTest::heap()->new_space());
-
- input = *i;
- Object* result = t.CallWithPotentialGC<Object>();
- double expected = static_cast<double>(input);
- t.CheckNumber(expected, result);
- }
- }
- }
-}
-
-
-TEST(RunChangeFloat64ToTagged) {
- ChangesLoweringTester<Object*> t;
- double input;
- t.BuildLoadAndLower(t.simplified()->ChangeFloat64ToTagged(),
- t.machine()->Load(kMachFloat64), &input);
-
- if (Pipeline::SupportedTarget()) {
- for (int m = 0; m < 3; m++) { // Try 3 GC modes.
- FOR_FLOAT64_INPUTS(i) {
- if (m == 0) CcTest::heap()->EnableInlineAllocation();
- if (m == 1) CcTest::heap()->DisableInlineAllocation();
- if (m == 2) SimulateFullSpace(CcTest::heap()->new_space());
-
- input = *i;
- Object* result = t.CallWithPotentialGC<Object>();
- t.CheckNumber(input, result);
- }
- }
- }
-}
-
-#endif // V8_TURBOFAN_BACKEND
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 252c43133e..29da5890ea 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -19,8 +19,6 @@
#include "src/compiler/schedule.h"
#include "test/cctest/cctest.h"
-#if V8_TURBOFAN_TARGET
-
using namespace v8::internal;
using namespace v8::internal::compiler;
@@ -80,7 +78,7 @@ TEST(TestLinkageCodeStubIncoming) {
CompilationInfo info(&stub, isolate, &zone);
CallDescriptor* descriptor = Linkage::ComputeIncoming(&zone, &info);
CHECK(descriptor);
- CHECK_EQ(1, static_cast<int>(descriptor->JSParameterCount()));
+ CHECK_EQ(0, static_cast<int>(descriptor->StackParameterCount()));
CHECK_EQ(1, static_cast<int>(descriptor->ReturnCount()));
CHECK_EQ(Operator::kNoProperties, descriptor->properties());
CHECK_EQ(false, descriptor->IsJSFunctionCall());
@@ -113,5 +111,3 @@ TEST(TestLinkageRuntimeCall) {
TEST(TestLinkageStubCall) {
// TODO(titzer): test linkage creation for outgoing stub calls.
}
-
-#endif // V8_TURBOFAN_TARGET
diff --git a/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
index ad05273995..b59f181f5e 100644
--- a/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
@@ -36,7 +36,7 @@ struct TestHelper : public HandleAndZoneScope {
CHECK(Rewriter::Rewrite(&parse_info));
CHECK(Scope::Analyze(&parse_info));
- Scope* scope = info.function()->scope();
+ Scope* scope = info.literal()->scope();
AstValueFactory* factory = parse_info.ast_value_factory();
CHECK(scope);
diff --git a/deps/v8/test/cctest/compiler/test-operator.cc b/deps/v8/test/cctest/compiler/test-operator.cc
index e635da797d..0ac33637da 100644
--- a/deps/v8/test/cctest/compiler/test-operator.cc
+++ b/deps/v8/test/cctest/compiler/test-operator.cc
@@ -69,10 +69,10 @@ TEST(TestOperator_Equals) {
}
-static SmartArrayPointer<const char> OperatorToString(Operator* op) {
+static v8::base::SmartArrayPointer<const char> OperatorToString(Operator* op) {
std::ostringstream os;
os << *op;
- return SmartArrayPointer<const char>(StrDup(os.str().c_str()));
+ return v8::base::SmartArrayPointer<const char>(StrDup(os.str().c_str()));
}
diff --git a/deps/v8/test/cctest/compiler/test-pipeline.cc b/deps/v8/test/cctest/compiler/test-pipeline.cc
index 84550d502a..8996718644 100644
--- a/deps/v8/test/cctest/compiler/test-pipeline.cc
+++ b/deps/v8/test/cctest/compiler/test-pipeline.cc
@@ -21,13 +21,8 @@ static void RunPipeline(Zone* zone, const char* source) {
CompilationInfo info(&parse_info);
Pipeline pipeline(&info);
-#if V8_TURBOFAN_TARGET
Handle<Code> code = pipeline.GenerateCode();
- CHECK(Pipeline::SupportedTarget());
CHECK(!code.is_null());
-#else
- USE(pipeline);
-#endif
}
diff --git a/deps/v8/test/cctest/compiler/test-run-deopt.cc b/deps/v8/test/cctest/compiler/test-run-deopt.cc
index d895924324..aedf668f44 100644
--- a/deps/v8/test/cctest/compiler/test-run-deopt.cc
+++ b/deps/v8/test/cctest/compiler/test-run-deopt.cc
@@ -4,14 +4,13 @@
#include "src/v8.h"
+#include "src/frames-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/function-tester.h"
using namespace v8::internal;
using namespace v8::internal::compiler;
-#if V8_TURBOFAN_TARGET
-
static void IsOptimized(const v8::FunctionCallbackInfo<v8::Value>& args) {
JavaScriptFrameIterator it(CcTest::i_isolate());
JavaScriptFrame* frame = it.frame();
@@ -103,7 +102,6 @@ TEST(DeoptExceptionHandlerFinally) {
#endif
}
-#endif
TEST(DeoptTrivial) {
FLAG_allow_natives_syntax = true;
diff --git a/deps/v8/test/cctest/compiler/test-run-inlining.cc b/deps/v8/test/cctest/compiler/test-run-inlining.cc
index 7f8ae25619..1b2559fc5f 100644
--- a/deps/v8/test/cctest/compiler/test-run-inlining.cc
+++ b/deps/v8/test/cctest/compiler/test-run-inlining.cc
@@ -4,10 +4,9 @@
#include "src/v8.h"
+#include "src/frames-inl.h"
#include "test/cctest/compiler/function-tester.h"
-#if V8_TURBOFAN_TARGET
-
using namespace v8::internal;
using namespace v8::internal::compiler;
@@ -574,5 +573,3 @@ TEST(InlineMutuallyRecursive) {
InstallAssertInlineCountHelper(CcTest::isolate());
T.CheckCall(T.Val(42), T.Val(1));
}
-
-#endif // V8_TURBOFAN_TARGET
diff --git a/deps/v8/test/cctest/compiler/test-run-jscalls.cc b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
index 8de2d7a214..893c2fa460 100644
--- a/deps/v8/test/cctest/compiler/test-run-jscalls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
@@ -132,20 +132,6 @@ TEST(ConstructorCall) {
// TODO(titzer): factor these out into test-runtime-calls.cc
-TEST(RuntimeCallCPP1) {
- FLAG_allow_natives_syntax = true;
- FunctionTester T("(function(a) { return %ToBool(a); })");
-
- T.CheckCall(T.true_value(), T.Val(23), T.undefined());
- T.CheckCall(T.true_value(), T.Val(4.2), T.undefined());
- T.CheckCall(T.true_value(), T.Val("str"), T.undefined());
- T.CheckCall(T.true_value(), T.true_value(), T.undefined());
- T.CheckCall(T.false_value(), T.false_value(), T.undefined());
- T.CheckCall(T.false_value(), T.undefined(), T.undefined());
- T.CheckCall(T.false_value(), T.Val(0.0), T.undefined());
-}
-
-
TEST(RuntimeCallCPP2) {
FLAG_allow_natives_syntax = true;
FunctionTester T("(function(a,b) { return %NumberAdd(a, b); })");
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index b1fc36968f..8b14dab46c 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -13,8 +13,6 @@
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
-#if V8_TURBOFAN_TARGET
-
using namespace v8::base;
using namespace v8::internal;
using namespace v8::internal::compiler;
@@ -82,7 +80,14 @@ TEST(CodeGenInt32Binop) {
}
-#if V8_TURBOFAN_BACKEND_64
+TEST(CodeGenNop) {
+ RawMachineAssemblerTester<void> m;
+ m.Return(m.Int32Constant(0));
+ m.GenerateCode();
+}
+
+
+#if V8_TARGET_ARCH_64_BIT
static Node* Int64Input(RawMachineAssemblerTester<int64_t>* m, int index) {
switch (index) {
case 0:
@@ -136,7 +141,7 @@ TEST(CodeGenInt64Binop) {
// TODO(titzer): add tests that run 64-bit integer operations.
-#endif // V8_TURBOFAN_BACKEND_64
+#endif // V8_TARGET_ARCH_64_BIT
TEST(RunGoto) {
@@ -5274,5 +5279,3 @@ TEST(RunCallCFunction8) {
}
#endif // USE_SIMULATOR
-
-#endif // V8_TURBOFAN_TARGET
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
new file mode 100644
index 0000000000..2e255c7729
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -0,0 +1,985 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/assembler.h"
+#include "src/codegen.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-type.h"
+#include "src/compiler/raw-machine-assembler.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::base;
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+typedef RawMachineAssembler::Label MLabel;
+
+#if V8_TARGET_ARCH_ARM64
+// TODO(titzer): fix native stack parameters on arm64
+#define DISABLE_NATIVE_STACK_PARAMS true
+#else
+#define DISABLE_NATIVE_STACK_PARAMS false
+#endif
+
+namespace {
+typedef float float32;
+typedef double float64;
+
+// Picks a representative pair of integers from the given range.
+// If there are less than {max_pairs} possible pairs, do them all, otherwise try
+// to select a representative set.
+class Pairs {
+ public:
+ Pairs(int max_pairs, int range)
+ : range_(range),
+ max_pairs_(std::min(max_pairs, range_ * range_)),
+ counter_(0) {}
+
+ bool More() { return counter_ < max_pairs_; }
+
+ void Next(int* r0, int* r1, bool same_is_ok) {
+ do {
+ // Find the next pair.
+ if (exhaustive()) {
+ *r0 = counter_ % range_;
+ *r1 = counter_ / range_;
+ } else {
+ // Try each integer at least once for both r0 and r1.
+ int index = counter_ / 2;
+ if (counter_ & 1) {
+ *r0 = index % range_;
+ *r1 = index / range_;
+ } else {
+ *r1 = index % range_;
+ *r0 = index / range_;
+ }
+ }
+ counter_++;
+ if (same_is_ok) break;
+ if (*r0 == *r1) {
+ if (counter_ >= max_pairs_) {
+ // For the last hurrah, reg#0 with reg#n-1
+ *r0 = 0;
+ *r1 = range_ - 1;
+ break;
+ }
+ }
+ } while (true);
+
+ DCHECK(*r0 >= 0 && *r0 < range_);
+ DCHECK(*r1 >= 0 && *r1 < range_);
+ }
+
+ private:
+ int range_;
+ int max_pairs_;
+ int counter_;
+ bool exhaustive() { return max_pairs_ == (range_ * range_); }
+};
+
+
+// Pairs of general purpose registers.
+class RegisterPairs : public Pairs {
+ public:
+ RegisterPairs() : Pairs(100, Register::kMaxNumAllocatableRegisters) {}
+};
+
+
+// Pairs of double registers.
+class Float32RegisterPairs : public Pairs {
+ public:
+ Float32RegisterPairs()
+ : Pairs(100, DoubleRegister::NumAllocatableAliasedRegisters()) {}
+};
+
+
+// Pairs of double registers.
+class Float64RegisterPairs : public Pairs {
+ public:
+ Float64RegisterPairs()
+ : Pairs(100, DoubleRegister::NumAllocatableAliasedRegisters()) {}
+};
+
+
+// Helper for allocating either an GP or FP reg, or the next stack slot.
+struct Allocator {
+ Allocator(int* gp, int gpc, int* fp, int fpc)
+ : gp_count(gpc),
+ gp_offset(0),
+ gp_regs(gp),
+ fp_count(fpc),
+ fp_offset(0),
+ fp_regs(fp),
+ stack_offset(0) {}
+
+ int gp_count;
+ int gp_offset;
+ int* gp_regs;
+
+ int fp_count;
+ int fp_offset;
+ int* fp_regs;
+
+ int stack_offset;
+
+ LinkageLocation Next(MachineType type) {
+ if (IsFloatingPoint(type)) {
+ // Allocate a floating point register/stack location.
+ if (fp_offset < fp_count) {
+ return LinkageLocation::ForRegister(fp_regs[fp_offset++]);
+ } else {
+ int offset = -1 - stack_offset;
+ stack_offset += StackWords(type);
+ return LinkageLocation::ForCallerFrameSlot(offset);
+ }
+ } else {
+ // Allocate a general purpose register/stack location.
+ if (gp_offset < gp_count) {
+ return LinkageLocation::ForRegister(gp_regs[gp_offset++]);
+ } else {
+ int offset = -1 - stack_offset;
+ stack_offset += StackWords(type);
+ return LinkageLocation::ForCallerFrameSlot(offset);
+ }
+ }
+ }
+ bool IsFloatingPoint(MachineType type) {
+ return RepresentationOf(type) == kRepFloat32 ||
+ RepresentationOf(type) == kRepFloat64;
+ }
+ int StackWords(MachineType type) {
+ // TODO(titzer): hack. float32 occupies 8 bytes on stack.
+ int size = (RepresentationOf(type) == kRepFloat32 ||
+ RepresentationOf(type) == kRepFloat64)
+ ? kDoubleSize
+ : ElementSizeOf(type);
+ return size <= kPointerSize ? 1 : size / kPointerSize;
+ }
+ void Reset() {
+ fp_offset = 0;
+ gp_offset = 0;
+ stack_offset = 0;
+ }
+};
+
+
+class RegisterConfig {
+ public:
+ RegisterConfig(Allocator& p, Allocator& r) : params(p), rets(r) {}
+
+ CallDescriptor* Create(Zone* zone, MachineSignature* msig) {
+ rets.Reset();
+ params.Reset();
+
+ LocationSignature::Builder locations(zone, msig->return_count(),
+ msig->parameter_count());
+ // Add return location(s).
+ const int return_count = static_cast<int>(locations.return_count_);
+ for (int i = 0; i < return_count; i++) {
+ locations.AddReturn(rets.Next(msig->GetReturn(i)));
+ }
+
+ // Add register and/or stack parameter(s).
+ const int parameter_count = static_cast<int>(msig->parameter_count());
+ for (int i = 0; i < parameter_count; i++) {
+ locations.AddParam(params.Next(msig->GetParam(i)));
+ }
+
+ const RegList kCalleeSaveRegisters = 0;
+ const RegList kCalleeSaveFPRegisters = 0;
+
+ MachineType target_type = compiler::kMachAnyTagged;
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ int stack_param_count = params.stack_offset;
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallCodeObject, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ msig, // machine_sig
+ locations.Build(), // location_sig
+ stack_param_count, // stack_parameter_count
+ compiler::Operator::kNoProperties, // properties
+ kCalleeSaveRegisters, // callee-saved registers
+ kCalleeSaveFPRegisters, // callee-saved fp regs
+ CallDescriptor::kNoFlags, // flags
+ "c-call");
+ }
+
+ private:
+ Allocator& params;
+ Allocator& rets;
+};
+
+const int kMaxParamCount = 64;
+
+MachineType kIntTypes[kMaxParamCount + 1] = {
+ kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
+ kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
+ kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
+ kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
+ kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
+ kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
+ kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
+ kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
+ kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
+ kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
+ kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32};
+
+
+// For making uniform int32 signatures shorter.
+class Int32Signature : public MachineSignature {
+ public:
+ explicit Int32Signature(int param_count)
+ : MachineSignature(1, param_count, kIntTypes) {
+ CHECK(param_count <= kMaxParamCount);
+ }
+};
+
+
+Handle<Code> CompileGraph(const char* name, CallDescriptor* desc, Graph* graph,
+ Schedule* schedule = nullptr) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ Handle<Code> code =
+ Pipeline::GenerateCodeForTesting(isolate, desc, graph, schedule);
+ CHECK(!code.is_null());
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_opt_code) {
+ OFStream os(stdout);
+ code->Disassemble(name, os);
+ }
+#endif
+ return code;
+}
+
+
+Handle<Code> WrapWithCFunction(Handle<Code> inner, CallDescriptor* desc) {
+ Zone zone;
+ MachineSignature* msig =
+ const_cast<MachineSignature*>(desc->GetMachineSignature());
+ int param_count = static_cast<int>(msig->parameter_count());
+ GraphAndBuilders caller(&zone);
+ {
+ GraphAndBuilders& b = caller;
+ Node* start = b.graph()->NewNode(b.common()->Start(param_count + 3));
+ b.graph()->SetStart(start);
+ Unique<HeapObject> unique = Unique<HeapObject>::CreateUninitialized(inner);
+ Node* target = b.graph()->NewNode(b.common()->HeapConstant(unique));
+
+ // Add arguments to the call.
+ Node** args = zone.NewArray<Node*>(param_count + 3);
+ int index = 0;
+ args[index++] = target;
+ for (int i = 0; i < param_count; i++) {
+ args[index] = b.graph()->NewNode(b.common()->Parameter(i), start);
+ index++;
+ }
+ args[index++] = start; // effect.
+ args[index++] = start; // control.
+
+ // Build the call and return nodes.
+ Node* call =
+ b.graph()->NewNode(b.common()->Call(desc), param_count + 3, args);
+ Node* ret = b.graph()->NewNode(b.common()->Return(), call, call, start);
+ b.graph()->SetEnd(ret);
+ }
+
+ CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, msig);
+
+ return CompileGraph("wrapper", cdesc, caller.graph());
+}
+
+
+template <typename CType>
+class ArgsBuffer {
+ public:
+ static const int kMaxParamCount = 64;
+
+ explicit ArgsBuffer(int count, int seed = 1) : count_(count), seed_(seed) {
+ // initialize the buffer with "seed 0"
+ seed_ = 0;
+ Mutate();
+ seed_ = seed;
+ }
+
+ class Sig : public MachineSignature {
+ public:
+ explicit Sig(int param_count)
+ : MachineSignature(1, param_count, MachTypes()) {
+ CHECK(param_count <= kMaxParamCount);
+ }
+ };
+
+ static MachineType* MachTypes() {
+ MachineType t = MachineTypeForC<CType>();
+ static MachineType kTypes[kMaxParamCount + 1] = {
+ t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t,
+ t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t,
+ t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t};
+ return kTypes;
+ }
+
+ Node* MakeConstant(RawMachineAssembler& raw, int32_t value) {
+ return raw.Int32Constant(value);
+ }
+
+ Node* MakeConstant(RawMachineAssembler& raw, int64_t value) {
+ return raw.Int64Constant(value);
+ }
+
+ Node* MakeConstant(RawMachineAssembler& raw, float32 value) {
+ return raw.Float32Constant(value);
+ }
+
+ Node* MakeConstant(RawMachineAssembler& raw, float64 value) {
+ return raw.Float64Constant(value);
+ }
+
+ Node* LoadInput(RawMachineAssembler& raw, Node* base, int index) {
+ Node* offset = raw.Int32Constant(index * sizeof(CType));
+ return raw.Load(MachineTypeForC<CType>(), base, offset);
+ }
+
+ Node* StoreOutput(RawMachineAssembler& raw, Node* value) {
+ Node* base = raw.PointerConstant(&output);
+ Node* offset = raw.Int32Constant(0);
+ return raw.Store(MachineTypeForC<CType>(), base, offset, value);
+ }
+
+ // Computes the next set of inputs by updating the {input} array.
+ void Mutate();
+
+ void Reset() { memset(input, 0, sizeof(input)); }
+
+ int count_;
+ int seed_;
+ CType input[kMaxParamCount];
+ CType output;
+};
+
+
+template <>
+void ArgsBuffer<int32_t>::Mutate() {
+ uint32_t base = 1111111111u * seed_;
+ for (int j = 0; j < count_ && j < kMaxParamCount; j++) {
+ input[j] = static_cast<int32_t>(256 + base + j + seed_ * 13);
+ }
+ output = -1;
+ seed_++;
+}
+
+
+template <>
+void ArgsBuffer<int64_t>::Mutate() {
+ uint64_t base = 11111111111111111ull * seed_;
+ for (int j = 0; j < count_ && j < kMaxParamCount; j++) {
+ input[j] = static_cast<int64_t>(256 + base + j + seed_ * 13);
+ }
+ output = -1;
+ seed_++;
+}
+
+
+template <>
+void ArgsBuffer<float32>::Mutate() {
+ float64 base = -33.25 * seed_;
+ for (int j = 0; j < count_ && j < kMaxParamCount; j++) {
+ input[j] = 256 + base + j + seed_ * 13;
+ }
+ output = std::numeric_limits<float32>::quiet_NaN();
+ seed_++;
+}
+
+
+template <>
+void ArgsBuffer<float64>::Mutate() {
+ float64 base = -111.25 * seed_;
+ for (int j = 0; j < count_ && j < kMaxParamCount; j++) {
+ input[j] = 256 + base + j + seed_ * 13;
+ }
+ output = std::numeric_limits<float64>::quiet_NaN();
+ seed_++;
+}
+
+
+int ParamCount(CallDescriptor* desc) {
+ return static_cast<int>(desc->GetMachineSignature()->parameter_count());
+}
+
+
+template <typename CType>
+class Computer {
+ public:
+ static void Run(CallDescriptor* desc,
+ void (*build)(CallDescriptor*, RawMachineAssembler&),
+ CType (*compute)(CallDescriptor*, CType* inputs),
+ int seed = 1) {
+ int num_params = ParamCount(desc);
+ CHECK_LE(num_params, kMaxParamCount);
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ Handle<Code> inner = Handle<Code>::null();
+ {
+ // Build the graph for the computation.
+ Zone zone;
+ Graph graph(&zone);
+ RawMachineAssembler raw(isolate, &graph, desc);
+ build(desc, raw);
+ inner = CompileGraph("Compute", desc, &graph, raw.Export());
+ }
+
+ CSignature0<int32_t> csig;
+ ArgsBuffer<CType> io(num_params, seed);
+
+ {
+ // constant mode.
+ Handle<Code> wrapper = Handle<Code>::null();
+ {
+ // Wrap the above code with a callable function that passes constants.
+ Zone zone;
+ Graph graph(&zone);
+ CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
+ RawMachineAssembler raw(isolate, &graph, cdesc);
+ Unique<HeapObject> unique =
+ Unique<HeapObject>::CreateUninitialized(inner);
+ Node* target = raw.HeapConstant(unique);
+ Node** args = zone.NewArray<Node*>(num_params);
+ for (int i = 0; i < num_params; i++) {
+ args[i] = io.MakeConstant(raw, io.input[i]);
+ }
+
+ Node* call = raw.CallN(desc, target, args);
+ Node* store = io.StoreOutput(raw, call);
+ USE(store);
+ raw.Return(raw.Int32Constant(seed));
+ wrapper =
+ CompileGraph("Compute-wrapper-const", cdesc, &graph, raw.Export());
+ }
+
+ CodeRunner<int32_t> runnable(isolate, wrapper, &csig);
+
+ // Run the code, checking it against the reference.
+ CType expected = compute(desc, io.input);
+ int32_t check_seed = runnable.Call();
+ CHECK_EQ(seed, check_seed);
+ CHECK_EQ(expected, io.output);
+ }
+
+ {
+ // buffer mode.
+ Handle<Code> wrapper = Handle<Code>::null();
+ {
+ // Wrap the above code with a callable function that loads from {input}.
+ Zone zone;
+ Graph graph(&zone);
+ CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
+ RawMachineAssembler raw(isolate, &graph, cdesc);
+ Node* base = raw.PointerConstant(io.input);
+ Unique<HeapObject> unique =
+ Unique<HeapObject>::CreateUninitialized(inner);
+ Node* target = raw.HeapConstant(unique);
+ Node** args = zone.NewArray<Node*>(kMaxParamCount);
+ for (int i = 0; i < num_params; i++) {
+ args[i] = io.LoadInput(raw, base, i);
+ }
+
+ Node* call = raw.CallN(desc, target, args);
+ Node* store = io.StoreOutput(raw, call);
+ USE(store);
+ raw.Return(raw.Int32Constant(seed));
+ wrapper = CompileGraph("Compute-wrapper", cdesc, &graph, raw.Export());
+ }
+
+ CodeRunner<int32_t> runnable(isolate, wrapper, &csig);
+
+ // Run the code, checking it against the reference.
+ for (int i = 0; i < 5; i++) {
+ CType expected = compute(desc, io.input);
+ int32_t check_seed = runnable.Call();
+ CHECK_EQ(seed, check_seed);
+ CHECK_EQ(expected, io.output);
+ io.Mutate();
+ }
+ }
+ }
+};
+
+} // namespace
+
+
+static void TestInt32Sub(CallDescriptor* desc) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ Zone zone;
+ GraphAndBuilders inner(&zone);
+ {
+ // Build the add function.
+ GraphAndBuilders& b = inner;
+ Node* start = b.graph()->NewNode(b.common()->Start(5));
+ b.graph()->SetStart(start);
+ Node* p0 = b.graph()->NewNode(b.common()->Parameter(0), start);
+ Node* p1 = b.graph()->NewNode(b.common()->Parameter(1), start);
+ Node* add = b.graph()->NewNode(b.machine()->Int32Sub(), p0, p1);
+ Node* ret = b.graph()->NewNode(b.common()->Return(), add, start, start);
+ b.graph()->SetEnd(ret);
+ }
+
+ Handle<Code> inner_code = CompileGraph("Int32Sub", desc, inner.graph());
+ Handle<Code> wrapper = WrapWithCFunction(inner_code, desc);
+ MachineSignature* msig =
+ const_cast<MachineSignature*>(desc->GetMachineSignature());
+ CodeRunner<int32_t> runnable(isolate, wrapper,
+ CSignature::FromMachine(&zone, msig));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(*i) -
+ static_cast<uint32_t>(*j));
+ int32_t result = runnable.Call(*i, *j);
+ CHECK_EQ(expected, result);
+ }
+ }
+}
+
+
+static void CopyTwentyInt32(CallDescriptor* desc) {
+ if (DISABLE_NATIVE_STACK_PARAMS) return;
+
+ const int kNumParams = 20;
+ int32_t input[kNumParams];
+ int32_t output[kNumParams];
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ Handle<Code> inner = Handle<Code>::null();
+ {
+ // Writes all parameters into the output buffer.
+ Zone zone;
+ Graph graph(&zone);
+ RawMachineAssembler raw(isolate, &graph, desc);
+ Node* base = raw.PointerConstant(output);
+ for (int i = 0; i < kNumParams; i++) {
+ Node* offset = raw.Int32Constant(i * sizeof(int32_t));
+ raw.Store(kMachInt32, base, offset, raw.Parameter(i));
+ }
+ raw.Return(raw.Int32Constant(42));
+ inner = CompileGraph("CopyTwentyInt32", desc, &graph, raw.Export());
+ }
+
+ CSignature0<int32_t> csig;
+ Handle<Code> wrapper = Handle<Code>::null();
+ {
+ // Loads parameters from the input buffer and calls the above code.
+ Zone zone;
+ Graph graph(&zone);
+ CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
+ RawMachineAssembler raw(isolate, &graph, cdesc);
+ Node* base = raw.PointerConstant(input);
+ Unique<HeapObject> unique = Unique<HeapObject>::CreateUninitialized(inner);
+ Node* target = raw.HeapConstant(unique);
+ Node** args = zone.NewArray<Node*>(kNumParams);
+ for (int i = 0; i < kNumParams; i++) {
+ Node* offset = raw.Int32Constant(i * sizeof(int32_t));
+ args[i] = raw.Load(kMachInt32, base, offset);
+ }
+
+ Node* call = raw.CallN(desc, target, args);
+ raw.Return(call);
+ wrapper =
+ CompileGraph("CopyTwentyInt32-wrapper", cdesc, &graph, raw.Export());
+ }
+
+ CodeRunner<int32_t> runnable(isolate, wrapper, &csig);
+
+ // Run the code, checking it correctly implements the memcpy.
+ for (int i = 0; i < 5; i++) {
+ uint32_t base = 1111111111u * i;
+ for (int j = 0; j < kNumParams; j++) {
+ input[j] = static_cast<int32_t>(base + 13 * j);
+ }
+
+ memset(output, 0, sizeof(output));
+ CHECK_EQ(42, runnable.Call());
+
+ for (int j = 0; j < kNumParams; j++) {
+ CHECK_EQ(input[j], output[j]);
+ }
+ }
+}
+
+
+static void Test_RunInt32SubWithRet(int retreg) {
+ Int32Signature sig(2);
+ Zone zone;
+ RegisterPairs pairs;
+ while (pairs.More()) {
+ int parray[2];
+ int rarray[] = {retreg};
+ pairs.Next(&parray[0], &parray[1], false);
+ Allocator params(parray, 2, nullptr, 0);
+ Allocator rets(rarray, 1, nullptr, 0);
+ RegisterConfig config(params, rets);
+ CallDescriptor* desc = config.Create(&zone, &sig);
+ TestInt32Sub(desc);
+ }
+}
+
+
+// Separate tests for parallelization.
+#define TEST_INT32_SUB_WITH_RET(x) \
+ TEST(Run_Int32Sub_all_allocatable_pairs_##x) { \
+ if (Register::kMaxNumAllocatableRegisters > x) Test_RunInt32SubWithRet(x); \
+ }
+
+
+TEST_INT32_SUB_WITH_RET(0)
+TEST_INT32_SUB_WITH_RET(1)
+TEST_INT32_SUB_WITH_RET(2)
+TEST_INT32_SUB_WITH_RET(3)
+TEST_INT32_SUB_WITH_RET(4)
+TEST_INT32_SUB_WITH_RET(5)
+TEST_INT32_SUB_WITH_RET(6)
+TEST_INT32_SUB_WITH_RET(7)
+TEST_INT32_SUB_WITH_RET(8)
+TEST_INT32_SUB_WITH_RET(9)
+TEST_INT32_SUB_WITH_RET(10)
+TEST_INT32_SUB_WITH_RET(11)
+TEST_INT32_SUB_WITH_RET(12)
+TEST_INT32_SUB_WITH_RET(13)
+TEST_INT32_SUB_WITH_RET(14)
+TEST_INT32_SUB_WITH_RET(15)
+TEST_INT32_SUB_WITH_RET(16)
+TEST_INT32_SUB_WITH_RET(17)
+TEST_INT32_SUB_WITH_RET(18)
+TEST_INT32_SUB_WITH_RET(19)
+
+
+TEST(Run_Int32Sub_all_allocatable_single) {
+ if (DISABLE_NATIVE_STACK_PARAMS) return;
+ Int32Signature sig(2);
+ RegisterPairs pairs;
+ while (pairs.More()) {
+ Zone zone;
+ int parray[1];
+ int rarray[1];
+ pairs.Next(&rarray[0], &parray[0], true);
+ Allocator params(parray, 1, nullptr, 0);
+ Allocator rets(rarray, 1, nullptr, 0);
+ RegisterConfig config(params, rets);
+ CallDescriptor* desc = config.Create(&zone, &sig);
+ TestInt32Sub(desc);
+ }
+}
+
+
+TEST(Run_CopyTwentyInt32_all_allocatable_pairs) {
+ if (DISABLE_NATIVE_STACK_PARAMS) return;
+ Int32Signature sig(20);
+ RegisterPairs pairs;
+ while (pairs.More()) {
+ Zone zone;
+ int parray[2];
+ int rarray[] = {0};
+ pairs.Next(&parray[0], &parray[1], false);
+ Allocator params(parray, 2, nullptr, 0);
+ Allocator rets(rarray, 1, nullptr, 0);
+ RegisterConfig config(params, rets);
+ CallDescriptor* desc = config.Create(&zone, &sig);
+ CopyTwentyInt32(desc);
+ }
+}
+
+
+template <typename CType>
+static void Run_Computation(
+ CallDescriptor* desc, void (*build)(CallDescriptor*, RawMachineAssembler&),
+ CType (*compute)(CallDescriptor*, CType* inputs), int seed = 1) {
+ Computer<CType>::Run(desc, build, compute, seed);
+}
+
+
+static uint32_t coeff[] = {1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
+ 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73,
+ 79, 83, 89, 97, 101, 103, 107, 109, 113};
+
+
+static void Build_Int32_WeightedSum(CallDescriptor* desc,
+ RawMachineAssembler& raw) {
+ Node* result = raw.Int32Constant(0);
+ for (int i = 0; i < ParamCount(desc); i++) {
+ Node* term = raw.Int32Mul(raw.Parameter(i), raw.Int32Constant(coeff[i]));
+ result = raw.Int32Add(result, term);
+ }
+ raw.Return(result);
+}
+
+
+static int32_t Compute_Int32_WeightedSum(CallDescriptor* desc, int32_t* input) {
+ uint32_t result = 0;
+ for (int i = 0; i < ParamCount(desc); i++) {
+ result += static_cast<uint32_t>(input[i]) * coeff[i];
+ }
+ return static_cast<int32_t>(result);
+}
+
+
+static void Test_Int32_WeightedSum_of_size(int count) {
+ if (DISABLE_NATIVE_STACK_PARAMS) return;
+ Int32Signature sig(count);
+ for (int p0 = 0; p0 < Register::kMaxNumAllocatableRegisters; p0++) {
+ Zone zone;
+
+ int parray[] = {p0};
+ int rarray[] = {0};
+ Allocator params(parray, 1, nullptr, 0);
+ Allocator rets(rarray, 1, nullptr, 0);
+ RegisterConfig config(params, rets);
+ CallDescriptor* desc = config.Create(&zone, &sig);
+ Run_Computation<int32_t>(desc, Build_Int32_WeightedSum,
+ Compute_Int32_WeightedSum, 257 + count);
+ }
+}
+
+
+// Separate tests for parallelization.
+#define TEST_INT32_WEIGHTEDSUM(x) \
+ TEST(Run_Int32_WeightedSum_##x) { Test_Int32_WeightedSum_of_size(x); }
+
+
+TEST_INT32_WEIGHTEDSUM(1)
+TEST_INT32_WEIGHTEDSUM(2)
+TEST_INT32_WEIGHTEDSUM(3)
+TEST_INT32_WEIGHTEDSUM(4)
+TEST_INT32_WEIGHTEDSUM(5)
+TEST_INT32_WEIGHTEDSUM(7)
+TEST_INT32_WEIGHTEDSUM(9)
+TEST_INT32_WEIGHTEDSUM(11)
+TEST_INT32_WEIGHTEDSUM(17)
+TEST_INT32_WEIGHTEDSUM(19)
+
+
+template <int which>
+static void Build_Select(CallDescriptor* desc, RawMachineAssembler& raw) {
+ raw.Return(raw.Parameter(which));
+}
+
+
+template <typename CType, int which>
+static CType Compute_Select(CallDescriptor* desc, CType* inputs) {
+ return inputs[which];
+}
+
+
+template <typename CType, int which>
+static void RunSelect(CallDescriptor* desc) {
+ int count = ParamCount(desc);
+ if (count <= which) return;
+ Run_Computation<CType>(desc, Build_Select<which>,
+ Compute_Select<CType, which>,
+ 1044 + which + 3 * sizeof(CType));
+}
+
+
+template <int which>
+void Test_Int32_Select() {
+ if (DISABLE_NATIVE_STACK_PARAMS) return;
+
+ int parray[] = {0};
+ int rarray[] = {0};
+ Allocator params(parray, 1, nullptr, 0);
+ Allocator rets(rarray, 1, nullptr, 0);
+ RegisterConfig config(params, rets);
+
+ Zone zone;
+
+ for (int i = which + 1; i <= 64; i++) {
+ Int32Signature sig(i);
+ CallDescriptor* desc = config.Create(&zone, &sig);
+ RunSelect<int32_t, which>(desc);
+ }
+}
+
+
+// Separate tests for parallelization.
+#define TEST_INT32_SELECT(x) \
+ TEST(Run_Int32_Select_##x) { Test_Int32_Select<x>(); }
+
+
+TEST_INT32_SELECT(0)
+TEST_INT32_SELECT(1)
+TEST_INT32_SELECT(2)
+TEST_INT32_SELECT(3)
+TEST_INT32_SELECT(4)
+TEST_INT32_SELECT(5)
+TEST_INT32_SELECT(6)
+TEST_INT32_SELECT(11)
+TEST_INT32_SELECT(15)
+TEST_INT32_SELECT(19)
+TEST_INT32_SELECT(45)
+TEST_INT32_SELECT(62)
+TEST_INT32_SELECT(63)
+
+
+TEST(Int64Select_registers) {
+ if (Register::kMaxNumAllocatableRegisters < 2) return;
+ if (kPointerSize < 8) return; // TODO(titzer): int64 on 32-bit platforms
+
+ int rarray[] = {0};
+ ArgsBuffer<int64_t>::Sig sig(2);
+
+ RegisterPairs pairs;
+ Zone zone;
+ while (pairs.More()) {
+ int parray[2];
+ pairs.Next(&parray[0], &parray[1], false);
+ Allocator params(parray, 2, nullptr, 0);
+ Allocator rets(rarray, 1, nullptr, 0);
+ RegisterConfig config(params, rets);
+
+ CallDescriptor* desc = config.Create(&zone, &sig);
+ RunSelect<int64_t, 0>(desc);
+ RunSelect<int64_t, 1>(desc);
+ }
+}
+
+
+TEST(Float32Select_registers) {
+ if (RegisterConfiguration::ArchDefault()->num_double_registers() < 2) return;
+
+ int rarray[] = {0};
+ ArgsBuffer<float32>::Sig sig(2);
+
+ Float32RegisterPairs pairs;
+ Zone zone;
+ while (pairs.More()) {
+ int parray[2];
+ pairs.Next(&parray[0], &parray[1], false);
+ Allocator params(nullptr, 0, parray, 2);
+ Allocator rets(nullptr, 0, rarray, 1);
+ RegisterConfig config(params, rets);
+
+ CallDescriptor* desc = config.Create(&zone, &sig);
+ RunSelect<float32, 0>(desc);
+ RunSelect<float32, 1>(desc);
+ }
+}
+
+
+TEST(Float64Select_registers) {
+ if (RegisterConfiguration::ArchDefault()->num_double_registers() < 2) return;
+
+ int rarray[] = {0};
+ ArgsBuffer<float64>::Sig sig(2);
+
+ Float64RegisterPairs pairs;
+ Zone zone;
+ while (pairs.More()) {
+ int parray[2];
+ pairs.Next(&parray[0], &parray[1], false);
+ Allocator params(nullptr, 0, parray, 2);
+ Allocator rets(nullptr, 0, rarray, 1);
+ RegisterConfig config(params, rets);
+
+ CallDescriptor* desc = config.Create(&zone, &sig);
+ RunSelect<float64, 0>(desc);
+ RunSelect<float64, 1>(desc);
+ }
+}
+
+
+TEST(Float32Select_stack_params_return_reg) {
+ if (DISABLE_NATIVE_STACK_PARAMS) return;
+ int rarray[] = {0};
+ Allocator params(nullptr, 0, nullptr, 0);
+ Allocator rets(nullptr, 0, rarray, 1);
+ RegisterConfig config(params, rets);
+
+ Zone zone;
+ for (int count = 1; count < 6; count++) {
+ ArgsBuffer<float32>::Sig sig(count);
+ CallDescriptor* desc = config.Create(&zone, &sig);
+ RunSelect<float32, 0>(desc);
+ RunSelect<float32, 1>(desc);
+ RunSelect<float32, 2>(desc);
+ RunSelect<float32, 3>(desc);
+ RunSelect<float32, 4>(desc);
+ RunSelect<float32, 5>(desc);
+ }
+}
+
+
+TEST(Float64Select_stack_params_return_reg) {
+ if (DISABLE_NATIVE_STACK_PARAMS) return;
+ int rarray[] = {0};
+ Allocator params(nullptr, 0, nullptr, 0);
+ Allocator rets(nullptr, 0, rarray, 1);
+ RegisterConfig config(params, rets);
+
+ Zone zone;
+ for (int count = 1; count < 6; count++) {
+ ArgsBuffer<float64>::Sig sig(count);
+ CallDescriptor* desc = config.Create(&zone, &sig);
+ RunSelect<float64, 0>(desc);
+ RunSelect<float64, 1>(desc);
+ RunSelect<float64, 2>(desc);
+ RunSelect<float64, 3>(desc);
+ RunSelect<float64, 4>(desc);
+ RunSelect<float64, 5>(desc);
+ }
+}
+
+
+template <typename CType, int which>
+static void Build_Select_With_Call(CallDescriptor* desc,
+ RawMachineAssembler& raw) {
+ Handle<Code> inner = Handle<Code>::null();
+ int num_params = ParamCount(desc);
+ CHECK_LE(num_params, kMaxParamCount);
+ {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ // Build the actual select.
+ Zone zone;
+ Graph graph(&zone);
+ RawMachineAssembler raw(isolate, &graph, desc);
+ raw.Return(raw.Parameter(which));
+ inner = CompileGraph("Select-indirection", desc, &graph, raw.Export());
+ CHECK(!inner.is_null());
+ CHECK(inner->IsCode());
+ }
+
+ {
+ // Build a call to the function that does the select.
+ Unique<HeapObject> unique = Unique<HeapObject>::CreateUninitialized(inner);
+ Node* target = raw.HeapConstant(unique);
+ Node** args = raw.zone()->NewArray<Node*>(num_params);
+ for (int i = 0; i < num_params; i++) {
+ args[i] = raw.Parameter(i);
+ }
+
+ Node* call = raw.CallN(desc, target, args);
+ raw.Return(call);
+ }
+}
+
+
+TEST(Float64StackParamsToStackParams) {
+ if (DISABLE_NATIVE_STACK_PARAMS) return;
+
+ int rarray[] = {0};
+ Allocator params(nullptr, 0, nullptr, 0);
+ Allocator rets(nullptr, 0, rarray, 1);
+
+ Zone zone;
+ ArgsBuffer<float64>::Sig sig(2);
+ RegisterConfig config(params, rets);
+ CallDescriptor* desc = config.Create(&zone, &sig);
+
+ Run_Computation<float64>(desc, Build_Select_With_Call<float64, 0>,
+ Compute_Select<float64, 0>, 1098);
+
+ Run_Computation<float64>(desc, Build_Select_With_Call<float64, 1>,
+ Compute_Select<float64, 1>, 1099);
+}
diff --git a/deps/v8/test/cctest/compiler/test-run-properties.cc b/deps/v8/test/cctest/compiler/test-run-properties.cc
index d4442f7a85..b7677f7fd2 100644
--- a/deps/v8/test/cctest/compiler/test-run-properties.cc
+++ b/deps/v8/test/cctest/compiler/test-run-properties.cc
@@ -21,16 +21,15 @@ static void TypedArrayLoadHelper(const char* array_type) {
values_builder.AddFormatted("a[%d] = 0x%08x;", i, kValues[i]);
}
- // Note that below source creates two different typed arrays with distinct
- // elements kind to get coverage for both access patterns:
- // - IsFixedTypedArrayElementsKind(x)
- // - IsExternalArrayElementsKind(y)
+ // Note that below source creates two different typed arrays with the same
+ // elements kind to get coverage for both (on heap / with external backing
+ // store) access patterns.
const char* source =
"(function(a) {"
" var x = (a = new %sArray(%d)); %s;"
" var y = (a = new %sArray(%d)); %s; %%TypedArrayGetBuffer(y);"
" if (!%%HasFixed%sElements(x)) %%AbortJS('x');"
- " if (!%%HasExternal%sElements(y)) %%AbortJS('y');"
+ " if (!%%HasFixed%sElements(y)) %%AbortJS('y');"
" function f(a,b) {"
" a = a | 0; b = b | 0;"
" return x[a] + y[b];"
@@ -84,16 +83,15 @@ static void TypedArrayStoreHelper(const char* array_type) {
values_builder.AddFormatted("a[%d] = 0x%08x;", i, kValues[i]);
}
- // Note that below source creates two different typed arrays with distinct
- // elements kind to get coverage for both access patterns:
- // - IsFixedTypedArrayElementsKind(x)
- // - IsExternalArrayElementsKind(y)
+ // Note that below source creates two different typed arrays with the same
+ // elements kind to get coverage for both (on heap/with external backing
+ // store) access patterns.
const char* source =
"(function(a) {"
" var x = (a = new %sArray(%d)); %s;"
" var y = (a = new %sArray(%d)); %s; %%TypedArrayGetBuffer(y);"
" if (!%%HasFixed%sElements(x)) %%AbortJS('x');"
- " if (!%%HasExternal%sElements(y)) %%AbortJS('y');"
+ " if (!%%HasFixed%sElements(y)) %%AbortJS('y');"
" function f(a,b) {"
" a = a | 0; b = b | 0;"
" var t = x[a];"
diff --git a/deps/v8/test/cctest/compiler/test-run-stubs.cc b/deps/v8/test/cctest/compiler/test-run-stubs.cc
index 9c7998d7af..607efa135b 100644
--- a/deps/v8/test/cctest/compiler/test-run-stubs.cc
+++ b/deps/v8/test/cctest/compiler/test-run-stubs.cc
@@ -14,23 +14,21 @@
#include "src/parser.h"
#include "test/cctest/compiler/function-tester.h"
-#if V8_TURBOFAN_TARGET
-
using namespace v8::internal;
using namespace v8::internal::compiler;
-TEST(RunMathFloorStub) {
+TEST(RunOptimizedMathFloorStub) {
HandleAndZoneScope scope;
Isolate* isolate = scope.main_isolate();
// Create code and an accompanying descriptor.
- MathFloorStub stub(isolate);
+ MathFloorStub stub(isolate, TurboFanIC::CALL_FROM_OPTIMIZED_CODE);
Handle<Code> code = stub.GenerateCode();
Zone* zone = scope.main_zone();
-
CompilationInfo info(&stub, isolate, zone);
CallDescriptor* descriptor = Linkage::ComputeIncoming(zone, &info);
+ Handle<FixedArray> tv = isolate->factory()->NewFixedArray(10);
// Create a function to call the code using the descriptor.
Graph graph(zone);
@@ -45,10 +43,13 @@ TEST(RunMathFloorStub) {
Node* numberParam = graph.NewNode(common.Parameter(1), start);
Unique<HeapObject> u = Unique<HeapObject>::CreateImmovable(code);
Node* theCode = graph.NewNode(common.HeapConstant(u));
+ Unique<HeapObject> tvu = Unique<HeapObject>::CreateImmovable(tv);
+ Node* vector = graph.NewNode(common.HeapConstant(tvu));
Node* dummyContext = graph.NewNode(common.NumberConstant(0.0));
- Node* call = graph.NewNode(common.Call(descriptor), theCode,
- js.UndefinedConstant(), js.UndefinedConstant(),
- numberParam, dummyContext, start, start);
+ Node* call =
+ graph.NewNode(common.Call(descriptor), theCode, js.UndefinedConstant(),
+ js.OneConstant(), vector, js.UndefinedConstant(),
+ numberParam, dummyContext, start, start);
Node* ret = graph.NewNode(common.Return(), call, call, start);
Node* end = graph.NewNode(common.End(1), ret);
graph.SetStart(start);
@@ -143,5 +144,3 @@ TEST(RunStringAddTFStub) {
Handle<Object> result = ft.Call(leftArg, rightArg).ToHandleChecked();
CHECK(String::Equals(ft.Val("linksrechts"), Handle<String>::cast(result)));
}
-
-#endif // V8_TURBOFAN_TARGET
diff --git a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
index 022e01690b..2a642c1589 100644
--- a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
@@ -110,14 +110,12 @@ TEST(RunNumberToInt32_float64) {
t.LowerAllNodes();
t.GenerateCode();
- if (Pipeline::SupportedTarget()) {
FOR_FLOAT64_INPUTS(i) {
input = *i;
int32_t expected = DoubleToInt32(*i);
t.Call();
CHECK_EQ(expected, result);
}
- }
}
@@ -139,7 +137,6 @@ TEST(RunNumberToUint32_float64) {
t.LowerAllNodes();
t.GenerateCode();
- if (Pipeline::SupportedTarget()) {
FOR_FLOAT64_INPUTS(i) {
input = *i;
uint32_t expected = DoubleToUint32(*i);
@@ -147,7 +144,6 @@ TEST(RunNumberToUint32_float64) {
CHECK_EQ(static_cast<int32_t>(expected), static_cast<int32_t>(result));
}
}
-}
// Create a simple JSObject with a unique map.
@@ -168,12 +164,10 @@ TEST(RunLoadMap) {
t.LowerAllNodes();
t.GenerateCode();
- if (Pipeline::SupportedTarget()) {
- Handle<JSObject> src = TestObject();
- Handle<Map> src_map(src->map());
- Object* result = t.Call(*src); // TODO(titzer): raw pointers in call
- CHECK_EQ(*src_map, result);
- }
+ Handle<JSObject> src = TestObject();
+ Handle<Map> src_map(src->map());
+ Object* result = t.Call(*src); // TODO(titzer): raw pointers in call
+ CHECK_EQ(*src_map, result);
}
@@ -186,7 +180,6 @@ TEST(RunStoreMap) {
t.LowerAllNodes();
t.GenerateCode();
- if (Pipeline::SupportedTarget()) {
Handle<JSObject> src = TestObject();
Handle<Map> src_map(src->map());
Handle<JSObject> dst = TestObject();
@@ -194,7 +187,6 @@ TEST(RunStoreMap) {
t.Call(*src_map, *dst); // TODO(titzer): raw pointers in call
CHECK(*src_map == dst->map());
}
-}
TEST(RunLoadProperties) {
@@ -206,12 +198,10 @@ TEST(RunLoadProperties) {
t.LowerAllNodes();
t.GenerateCode();
- if (Pipeline::SupportedTarget()) {
Handle<JSObject> src = TestObject();
Handle<FixedArray> src_props(src->properties());
Object* result = t.Call(*src); // TODO(titzer): raw pointers in call
CHECK_EQ(*src_props, result);
- }
}
@@ -225,7 +215,6 @@ TEST(RunLoadStoreMap) {
t.LowerAllNodes();
t.GenerateCode();
- if (Pipeline::SupportedTarget()) {
Handle<JSObject> src = TestObject();
Handle<Map> src_map(src->map());
Handle<JSObject> dst = TestObject();
@@ -234,7 +223,6 @@ TEST(RunLoadStoreMap) {
CHECK(result->IsMap());
CHECK_EQ(*src_map, result);
CHECK(*src_map == dst->map());
- }
}
@@ -248,7 +236,6 @@ TEST(RunLoadStoreFixedArrayIndex) {
t.LowerAllNodes();
t.GenerateCode();
- if (Pipeline::SupportedTarget()) {
Handle<FixedArray> array = t.factory()->NewFixedArray(2);
Handle<JSObject> src = TestObject();
Handle<JSObject> dst = TestObject();
@@ -258,7 +245,6 @@ TEST(RunLoadStoreFixedArrayIndex) {
CHECK_EQ(*src, result);
CHECK_EQ(*src, array->get(0));
CHECK_EQ(*src, array->get(1));
- }
}
@@ -279,7 +265,6 @@ TEST(RunLoadStoreArrayBuffer) {
t.LowerAllNodes();
t.GenerateCode();
- if (Pipeline::SupportedTarget()) {
Handle<JSArrayBuffer> array = t.factory()->NewJSArrayBuffer();
Runtime::SetupArrayBufferAllocatingData(t.isolate(), array, array_length);
uint8_t* data = reinterpret_cast<uint8_t*>(array->backing_store());
@@ -296,7 +281,6 @@ TEST(RunLoadStoreArrayBuffer) {
CHECK_EQ(data[i], expected);
}
}
-}
TEST(RunLoadFieldFromUntaggedBase) {
@@ -312,8 +296,6 @@ TEST(RunLoadFieldFromUntaggedBase) {
t.Return(load);
t.LowerAllNodes();
- if (!Pipeline::SupportedTarget()) continue;
-
for (int j = -5; j <= 5; j++) {
Smi* expected = Smi::FromInt(j);
smis[i] = expected;
@@ -337,8 +319,6 @@ TEST(RunStoreFieldToUntaggedBase) {
t.Return(p0);
t.LowerAllNodes();
- if (!Pipeline::SupportedTarget()) continue;
-
for (int j = -5; j <= 5; j++) {
Smi* expected = Smi::FromInt(j);
smis[i] = Smi::FromInt(-100);
@@ -365,8 +345,6 @@ TEST(RunLoadElementFromUntaggedBase) {
t.Return(load);
t.LowerAllNodes();
- if (!Pipeline::SupportedTarget()) continue;
-
for (int k = -5; k <= 5; k++) {
Smi* expected = Smi::FromInt(k);
smis[i + j] = expected;
@@ -394,8 +372,6 @@ TEST(RunStoreElementFromUntaggedBase) {
t.Return(p0);
t.LowerAllNodes();
- if (!Pipeline::SupportedTarget()) continue;
-
for (int k = -5; k <= 5; k++) {
Smi* expected = Smi::FromInt(k);
smis[i + j] = Smi::FromInt(-100);
@@ -462,10 +438,8 @@ class AccessTester : public HandleAndZoneScope {
t.LowerAllNodes();
t.GenerateCode();
- if (Pipeline::SupportedTarget()) {
Object* result = t.Call();
CHECK_EQ(t.isolate()->heap()->true_value(), result);
- }
}
// Create and run code that copies the field in either {untagged_array}
@@ -484,10 +458,8 @@ class AccessTester : public HandleAndZoneScope {
t.LowerAllNodes();
t.GenerateCode();
- if (Pipeline::SupportedTarget()) {
Object* result = t.Call();
CHECK_EQ(t.isolate()->heap()->true_value(), result);
- }
}
// Create and run code that copies the elements from {this} to {that}.
@@ -525,10 +497,8 @@ class AccessTester : public HandleAndZoneScope {
t.LowerAllNodes();
t.GenerateCode();
- if (Pipeline::SupportedTarget()) {
Object* result = t.Call();
CHECK_EQ(t.isolate()->heap()->true_value(), result);
- }
#endif
}
@@ -596,13 +566,11 @@ static void RunAccessTest(MachineType rep, E* original_elements, size_t num) {
} else {
a.RunCopyElement(i, i + 1); // Test element read/write.
}
- if (Pipeline::SupportedTarget()) { // verify.
for (int j = 0; j < num_elements; j++) {
E expect =
j == (i + 1) ? original_elements[i] : original_elements[j];
CHECK_EQ(expect, a.GetElement(j));
}
- }
}
}
}
@@ -612,10 +580,8 @@ static void RunAccessTest(MachineType rep, E* original_elements, size_t num) {
AccessTester<E> a(tf == 1, rep, original_elements, num);
AccessTester<E> b(tt == 1, rep, original_elements, num);
a.RunCopyElements(&b);
- if (Pipeline::SupportedTarget()) { // verify.
for (int i = 0; i < num_elements; i++) {
CHECK_EQ(a.GetElement(i), b.GetElement(i));
- }
}
}
}
@@ -668,7 +634,7 @@ TEST(RunAccessTests_Smi) {
RunAccessTest<Smi*>(kMachAnyTagged, data, arraysize(data));
}
-#if V8_TURBOFAN_TARGET
+
TEST(RunAllocate) {
PretenureFlag flag[] = {NOT_TENURED, TENURED};
@@ -684,15 +650,13 @@ TEST(RunAllocate) {
t.LowerAllNodes();
t.GenerateCode();
- if (Pipeline::SupportedTarget()) {
HeapObject* result = t.CallWithPotentialGC<HeapObject>();
CHECK(t.heap()->new_space()->Contains(result) || flag[i] == TENURED);
CHECK(t.heap()->old_space()->Contains(result) || flag[i] == NOT_TENURED);
CHECK(result->IsHeapNumber());
- }
}
}
-#endif
+
// Fills in most of the nodes of the graph in order to make tests shorter.
class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
@@ -1264,7 +1228,6 @@ TEST(LowerReferenceEqual_to_wordeq) {
TEST(LowerStringOps_to_call_and_compare) {
- if (Pipeline::SupportedTarget()) {
// These tests need linkage for the calls.
TestingGraph t(Type::String(), Type::String());
IrOpcode::Value compare_eq =
@@ -1277,7 +1240,6 @@ TEST(LowerStringOps_to_call_and_compare) {
t.CheckLoweringBinop(compare_lt, t.simplified()->StringLessThan());
t.CheckLoweringBinop(compare_le, t.simplified()->StringLessThanOrEqual());
}
-}
void CheckChangeInsertion(IrOpcode::Value expected, MachineType from,
@@ -1708,7 +1670,6 @@ TEST(RunNumberDivide_minus_1_TruncatingToInt32) {
Node* trunc = t.NumberToInt32(div);
t.Return(trunc);
- if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
@@ -1716,7 +1677,6 @@ TEST(RunNumberDivide_minus_1_TruncatingToInt32) {
int32_t x = 0 - *i;
t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
}
- }
}
@@ -1747,7 +1707,6 @@ TEST(RunNumberMultiply_TruncatingToInt32) {
Node* trunc = t.NumberToInt32(mul);
t.Return(trunc);
- if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
@@ -1756,7 +1715,6 @@ TEST(RunNumberMultiply_TruncatingToInt32) {
t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
}
}
- }
}
@@ -1771,14 +1729,12 @@ TEST(RunNumberMultiply_TruncatingToUint32) {
Node* trunc = t.NumberToUint32(mul);
t.Return(trunc);
- if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
FOR_UINT32_INPUTS(i) {
uint32_t x = DoubleToUint32(static_cast<double>(*i) * k);
t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
- }
}
}
}
@@ -1791,7 +1747,6 @@ TEST(RunNumberDivide_2_TruncatingToUint32) {
Node* trunc = t.NumberToUint32(div);
t.Return(trunc);
- if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
@@ -1799,7 +1754,6 @@ TEST(RunNumberDivide_2_TruncatingToUint32) {
uint32_t x = DoubleToUint32(static_cast<double>(*i / 2.0));
t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
}
- }
}
@@ -1853,7 +1807,6 @@ TEST(RunNumberDivide_TruncatingToInt32) {
Node* trunc = t.NumberToInt32(div);
t.Return(trunc);
- if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
@@ -1861,7 +1814,6 @@ TEST(RunNumberDivide_TruncatingToInt32) {
if (*i == INT_MAX) continue; // exclude max int.
int32_t x = DoubleToInt32(static_cast<double>(*i) / k);
t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
- }
}
}
}
@@ -1894,14 +1846,12 @@ TEST(RunNumberDivide_TruncatingToUint32) {
Node* trunc = t.NumberToUint32(div);
t.Return(trunc);
- if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
FOR_UINT32_INPUTS(i) {
uint32_t x = *i / k;
t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
- }
}
}
}
@@ -1972,7 +1922,6 @@ TEST(RunNumberModulus_TruncatingToInt32) {
Node* trunc = t.NumberToInt32(mod);
t.Return(trunc);
- if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
@@ -1980,7 +1929,6 @@ TEST(RunNumberModulus_TruncatingToInt32) {
if (*i == INT_MAX) continue; // exclude max int.
int32_t x = DoubleToInt32(std::fmod(static_cast<double>(*i), k));
t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
- }
}
}
}
@@ -2014,14 +1962,12 @@ TEST(RunNumberModulus_TruncatingToUint32) {
Node* trunc = t.NumberToUint32(mod);
t.Return(trunc);
- if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
FOR_UINT32_INPUTS(i) {
uint32_t x = *i % k;
t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
- }
}
}
}
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
new file mode 100644
index 0000000000..deb6c971a1
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -0,0 +1,129 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler.h"
+#include "src/interpreter/bytecode-generator.h"
+#include "src/interpreter/interpreter.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeGeneratorHelper {
+ public:
+ const char* kFunctionName = "my_function";
+
+ BytecodeGeneratorHelper() {
+ i::FLAG_ignition = true;
+ i::FLAG_ignition_filter = kFunctionName;
+ CcTest::i_isolate()->interpreter()->Initialize();
+ }
+
+
+ Handle<BytecodeArray> MakeBytecode(const char* script,
+ const char* function_name) {
+ CompileRun(script);
+ Local<Function> function =
+ Local<Function>::Cast(CcTest::global()->Get(v8_str(function_name)));
+ i::Handle<i::JSFunction> js_function = v8::Utils::OpenHandle(*function);
+ return handle(js_function->shared()->bytecode_array(), CcTest::i_isolate());
+ }
+
+
+ Handle<BytecodeArray> MakeBytecodeForFunctionBody(const char* body) {
+ ScopedVector<char> program(1024);
+ SNPrintF(program, "function %s() { %s }\n%s();", kFunctionName, body,
+ kFunctionName);
+ return MakeBytecode(program.start(), kFunctionName);
+ }
+};
+
+
+// Structure for containing expected bytecode snippets.
+struct ExpectedSnippet {
+ const char* body;
+ int frame_size;
+ int bytecode_length;
+ const uint8_t bytecode[16];
+};
+
+
+// Helper macros for handcrafting bytecode sequences.
+#define B(x) static_cast<uint8_t>(Bytecode::k##x)
+#define U8(x) static_cast<uint8_t>(x & 0xff)
+#define R(x) static_cast<uint8_t>(-x & 0xff)
+
+
+TEST(PrimitiveReturnStatements) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ ExpectedSnippet snippets[] = {
+ {"return;", 0, 2, {B(LdaUndefined), B(Return)}},
+ {"return null;", 0, 2, {B(LdaNull), B(Return)}},
+ {"return true;", 0, 2, {B(LdaTrue), B(Return)}},
+ {"return false;", 0, 2, {B(LdaFalse), B(Return)}},
+ {"return 0;", 0, 2, {B(LdaZero), B(Return)}},
+ {"return +1;", 0, 3, {B(LdaSmi8), U8(1), B(Return)}},
+ {"return -1;", 0, 3, {B(LdaSmi8), U8(-1), B(Return)}},
+ {"return +127;", 0, 3, {B(LdaSmi8), U8(127), B(Return)}},
+ {"return -128;", 0, 3, {B(LdaSmi8), U8(-128), B(Return)}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ Handle<BytecodeArray> ba =
+ helper.MakeBytecodeForFunctionBody(snippets[i].body);
+ CHECK_EQ(ba->frame_size(), snippets[i].frame_size);
+ CHECK_EQ(ba->length(), snippets[i].bytecode_length);
+ CHECK(!memcmp(ba->GetFirstBytecodeAddress(), snippets[i].bytecode,
+ ba->length()));
+ }
+}
+
+
+TEST(PrimitiveExpressions) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ ExpectedSnippet snippets[] = {
+ {"var x = 0; return x;",
+ kPointerSize,
+ 6,
+ {
+ B(LdaZero), //
+ B(Star), R(0), //
+ B(Ldar), R(0), //
+ B(Return) //
+ }},
+ {"var x = 0; return x + 3;",
+ 2 * kPointerSize,
+ 12,
+ {
+ B(LdaZero), //
+ B(Star), R(0), //
+ B(Ldar), R(0), // Easy to spot r1 not really needed here.
+ B(Star), R(1), // Dead store.
+ B(LdaSmi8), U8(3), //
+ B(Add), R(1), //
+ B(Return) //
+ }}};
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ Handle<BytecodeArray> ba =
+ helper.MakeBytecodeForFunctionBody(snippets[i].body);
+ CHECK_EQ(ba->frame_size(), snippets[i].frame_size);
+ CHECK_EQ(ba->length(), snippets[i].bytecode_length);
+ CHECK(!memcmp(ba->GetFirstBytecodeAddress(), snippets[i].bytecode,
+ ba->length()));
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespance v8
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
new file mode 100644
index 0000000000..2302fdc9ac
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -0,0 +1,208 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/execution.h"
+#include "src/handles.h"
+#include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/interpreter.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class InterpreterCallable {
+ public:
+ InterpreterCallable(Isolate* isolate, Handle<JSFunction> function)
+ : isolate_(isolate), function_(function) {}
+ virtual ~InterpreterCallable() {}
+
+ MaybeHandle<Object> operator()() {
+ return Execution::Call(isolate_, function_,
+ isolate_->factory()->undefined_value(), 0, nullptr,
+ false);
+ }
+
+ private:
+ Isolate* isolate_;
+ Handle<JSFunction> function_;
+};
+
+class InterpreterTester {
+ public:
+ InterpreterTester(Isolate* isolate, Handle<BytecodeArray> bytecode)
+ : isolate_(isolate), function_(GetBytecodeFunction(isolate, bytecode)) {
+ i::FLAG_ignition = true;
+ // Ensure handler table is generated.
+ isolate->interpreter()->Initialize();
+ }
+ virtual ~InterpreterTester() {}
+
+ InterpreterCallable GetCallable() {
+ return InterpreterCallable(isolate_, function_);
+ }
+
+ private:
+ Isolate* isolate_;
+ Handle<JSFunction> function_;
+
+ static Handle<JSFunction> GetBytecodeFunction(
+ Isolate* isolate, Handle<BytecodeArray> bytecode_array) {
+ Handle<JSFunction> function = v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(CompileRun("(function(){})")));
+ function->ReplaceCode(*isolate->builtins()->InterpreterEntryTrampoline());
+ function->shared()->set_function_data(*bytecode_array);
+ return function;
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(InterpreterTester);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+using v8::internal::BytecodeArray;
+using v8::internal::Handle;
+using v8::internal::Object;
+using v8::internal::Smi;
+using namespace v8::internal::interpreter;
+
+TEST(TestInterpreterReturn) {
+ InitializedHandleScope handles;
+ Handle<Object> undefined_value =
+ handles.main_isolate()->factory()->undefined_value();
+
+ BytecodeArrayBuilder builder(handles.main_isolate());
+ builder.set_locals_count(0);
+ builder.Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterCallable callable(tester.GetCallable());
+ Handle<Object> return_val = callable().ToHandleChecked();
+ CHECK(return_val.is_identical_to(undefined_value));
+}
+
+
+TEST(TestInterpreterLoadUndefined) {
+ InitializedHandleScope handles;
+ Handle<Object> undefined_value =
+ handles.main_isolate()->factory()->undefined_value();
+
+ BytecodeArrayBuilder builder(handles.main_isolate());
+ builder.set_locals_count(0);
+ builder.LoadUndefined().Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterCallable callable(tester.GetCallable());
+ Handle<Object> return_val = callable().ToHandleChecked();
+ CHECK(return_val.is_identical_to(undefined_value));
+}
+
+
+TEST(TestInterpreterLoadNull) {
+ InitializedHandleScope handles;
+ Handle<Object> null_value = handles.main_isolate()->factory()->null_value();
+
+ BytecodeArrayBuilder builder(handles.main_isolate());
+ builder.set_locals_count(0);
+ builder.LoadNull().Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterCallable callable(tester.GetCallable());
+ Handle<Object> return_val = callable().ToHandleChecked();
+ CHECK(return_val.is_identical_to(null_value));
+}
+
+
+TEST(TestInterpreterLoadTheHole) {
+ InitializedHandleScope handles;
+ Handle<Object> the_hole_value =
+ handles.main_isolate()->factory()->the_hole_value();
+
+ BytecodeArrayBuilder builder(handles.main_isolate());
+ builder.set_locals_count(0);
+ builder.LoadTheHole().Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterCallable callable(tester.GetCallable());
+ Handle<Object> return_val = callable().ToHandleChecked();
+ CHECK(return_val.is_identical_to(the_hole_value));
+}
+
+
+TEST(TestInterpreterLoadTrue) {
+ InitializedHandleScope handles;
+ Handle<Object> true_value = handles.main_isolate()->factory()->true_value();
+
+ BytecodeArrayBuilder builder(handles.main_isolate());
+ builder.set_locals_count(0);
+ builder.LoadTrue().Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterCallable callable(tester.GetCallable());
+ Handle<Object> return_val = callable().ToHandleChecked();
+ CHECK(return_val.is_identical_to(true_value));
+}
+
+
+TEST(TestInterpreterLoadFalse) {
+ InitializedHandleScope handles;
+ Handle<Object> false_value = handles.main_isolate()->factory()->false_value();
+
+ BytecodeArrayBuilder builder(handles.main_isolate());
+ builder.set_locals_count(0);
+ builder.LoadFalse().Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterCallable callable(tester.GetCallable());
+ Handle<Object> return_val = callable().ToHandleChecked();
+ CHECK(return_val.is_identical_to(false_value));
+}
+
+
+TEST(TestInterpreterLoadLiteral) {
+ InitializedHandleScope handles;
+ for (int i = -128; i < 128; i++) {
+ BytecodeArrayBuilder builder(handles.main_isolate());
+ builder.set_locals_count(0);
+ builder.LoadLiteral(Smi::FromInt(i)).Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterCallable callable(tester.GetCallable());
+ Handle<Object> return_val = callable().ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(i));
+ }
+}
+
+
+TEST(TestInterpreterLoadStoreRegisters) {
+ InitializedHandleScope handles;
+ Handle<Object> true_value = handles.main_isolate()->factory()->true_value();
+ for (int i = 0; i <= Register::kMaxRegisterIndex; i++) {
+ BytecodeArrayBuilder builder(handles.main_isolate());
+ builder.set_locals_count(i + 1);
+ Register reg(i);
+ builder.LoadTrue()
+ .StoreAccumulatorInRegister(reg)
+ .LoadFalse()
+ .LoadAccumulatorWithRegister(reg)
+ .Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterCallable callable(tester.GetCallable());
+ Handle<Object> return_val = callable().ToHandleChecked();
+ CHECK(return_val.is_identical_to(true_value));
+ }
+}
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index 2e9bc74a92..c0a8a287ae 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -14,7 +14,6 @@
#include "src/execution.h"
#include "src/objects.h"
#include "src/parser.h"
-#include "src/smart-pointers.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "src/vm-state.h"
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 88d4aef25e..dad5d6caf5 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -40,12 +40,13 @@
#include "src/api.h"
#include "src/arguments.h"
#include "src/base/platform/platform.h"
+#include "src/base/smart-pointers.h"
#include "src/compilation-cache.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/execution.h"
+#include "src/futex-emulation.h"
#include "src/objects.h"
#include "src/parser.h"
-#include "src/smart-pointers.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "src/vm-state.h"
@@ -522,7 +523,7 @@ TEST(MakingExternalStringConditions) {
String::NewFromTwoByte(env->GetIsolate(), two_byte_string);
i::DeleteArray(two_byte_string);
- // We should refuse to externalize newly created small string.
+ // We should refuse to externalize small strings.
CHECK(!small_string->CanMakeExternal());
// Trigger GCs so that the newly allocated string moves to old gen.
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
@@ -534,14 +535,6 @@ TEST(MakingExternalStringConditions) {
small_string = String::NewFromTwoByte(env->GetIsolate(), two_byte_string);
i::DeleteArray(two_byte_string);
- // We should refuse externalizing newly created small string.
- CHECK(!small_string->CanMakeExternal());
- for (int i = 0; i < 100; i++) {
- String::Value value(small_string);
- }
- // Frequently used strings should be accepted.
- CHECK(small_string->CanMakeExternal());
-
const int buf_size = 10 * 1024;
char* buf = i::NewArray<char>(buf_size);
memset(buf, 'a', buf_size);
@@ -566,7 +559,7 @@ TEST(MakingExternalOneByteStringConditions) {
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
Local<String> small_string = String::NewFromUtf8(env->GetIsolate(), "s1");
- // We should refuse to externalize newly created small string.
+ // We should refuse to externalize small strings.
CHECK(!small_string->CanMakeExternal());
// Trigger GCs so that the newly allocated string moves to old gen.
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
@@ -574,15 +567,6 @@ TEST(MakingExternalOneByteStringConditions) {
// Old space strings should be accepted.
CHECK(small_string->CanMakeExternal());
- small_string = String::NewFromUtf8(env->GetIsolate(), "small string 2");
- // We should refuse externalizing newly created small string.
- CHECK(!small_string->CanMakeExternal());
- for (int i = 0; i < 100; i++) {
- String::Value value(small_string);
- }
- // Frequently used strings should be accepted.
- CHECK(small_string->CanMakeExternal());
-
const int buf_size = 10 * 1024;
char* buf = i::NewArray<char>(buf_size);
memset(buf, 'a', buf_size);
@@ -1965,7 +1949,7 @@ THREADED_TEST(UndefinedIsNotEnumerable) {
v8::Handle<Script> call_recursively_script;
-static const int kTargetRecursionDepth = 200; // near maximum
+static const int kTargetRecursionDepth = 150; // near maximum
static void CallScriptRecursivelyCall(
@@ -3275,6 +3259,7 @@ TEST(TwoPassPhantomCallbacks) {
}
CHECK_EQ(static_cast<int>(kLength), instance_counter);
CcTest::heap()->CollectAllGarbage();
+ EmptyMessageQueues(isolate);
CHECK_EQ(0, instance_counter);
}
@@ -3293,6 +3278,7 @@ TEST(TwoPassPhantomCallbacksNestedGc) {
array[15]->MarkTriggerGc();
CHECK_EQ(static_cast<int>(kLength), instance_counter);
CcTest::heap()->CollectAllGarbage();
+ EmptyMessageQueues(isolate);
CHECK_EQ(0, instance_counter);
}
@@ -3342,6 +3328,8 @@ class PhantomStdMapTraits : public v8::StdMapTraits<K, V> {
CHECK_EQ(IntKeyToVoidPointer(key),
v8::Object::GetAlignedPointerFromInternalField(value, 0));
}
+ static void OnWeakCallback(
+ const v8::WeakCallbackInfo<WeakCallbackDataType>&) {}
static void DisposeWeak(
const v8::WeakCallbackInfo<WeakCallbackDataType>& info) {
K key = KeyFromWeakCallbackInfo(info);
@@ -6782,6 +6770,7 @@ static void ForceMarkSweep1(
THREADED_TEST(GCFromWeakCallbacks) {
v8::Isolate* isolate = CcTest::isolate();
+ v8::Locker locker(CcTest::isolate());
v8::HandleScope scope(isolate);
v8::Handle<Context> context = Context::New(isolate);
Context::Scope context_scope(context);
@@ -6806,6 +6795,7 @@ THREADED_TEST(GCFromWeakCallbacks) {
v8::WeakCallbackType::kParameter);
object.handle.MarkIndependent();
invoke_gc[outer_gc]();
+ EmptyMessageQueues(isolate);
CHECK(object.flag);
}
}
@@ -8135,7 +8125,45 @@ THREADED_TEST(CrossDomainIsPropertyEnumerable) {
}
-THREADED_TEST(CrossDomainForIn) {
+THREADED_TEST(CrossDomainFor) {
+ LocalContext env1;
+ v8::HandleScope handle_scope(env1->GetIsolate());
+ v8::Handle<Context> env2 = Context::New(env1->GetIsolate());
+
+ Local<Value> foo = v8_str("foo");
+ Local<Value> bar = v8_str("bar");
+
+ // Set to the same domain.
+ env1->SetSecurityToken(foo);
+ env2->SetSecurityToken(foo);
+
+ env1->Global()->Set(v8_str("prop"), v8_num(3));
+ env2->Global()->Set(v8_str("env1"), env1->Global());
+
+ // Change env2 to a different domain and set env1's global object
+ // as the __proto__ of an object in env2 and enumerate properties
+ // in for-in. It shouldn't enumerate properties on env1's global
+ // object.
+ env2->SetSecurityToken(bar);
+ {
+ Context::Scope scope_env2(env2);
+ Local<Value> result = CompileRun(
+ "(function() {"
+ " try {"
+ " for (var p in env1) {"
+ " if (p == 'prop') return false;"
+ " }"
+ " return true;"
+ " } catch (e) {"
+ " return false;"
+ " }"
+ "})()");
+ CHECK(result->IsTrue());
+ }
+}
+
+
+THREADED_TEST(CrossDomainForInOnPrototype) {
LocalContext env1;
v8::HandleScope handle_scope(env1->GetIsolate());
v8::Handle<Context> env2 = Context::New(env1->GetIsolate());
@@ -8668,7 +8696,7 @@ TEST(AccessControlES5) {
global1->Set(v8_str("other"), global0);
// Regression test for issue 1154.
- CHECK(CompileRun("Object.keys(other)").IsEmpty());
+ CHECK(CompileRun("Object.keys(other).length == 0")->BooleanValue());
CHECK(CompileRun("other.blocked_prop").IsEmpty());
// Regression test for issue 1027.
@@ -8719,6 +8747,11 @@ THREADED_TEST(AccessControlGetOwnPropertyNames) {
obj_template->Set(v8_str("x"), v8::Integer::New(isolate, 42));
obj_template->SetAccessCheckCallbacks(AccessAlwaysBlocked, NULL);
+ // Add an accessor accessible by cross-domain JS code.
+ obj_template->SetAccessor(
+ v8_str("accessible_prop"), EchoGetter, EchoSetter, v8::Handle<Value>(),
+ v8::AccessControl(v8::ALL_CAN_READ | v8::ALL_CAN_WRITE));
+
// Create an environment
v8::Local<Context> context0 = Context::New(isolate, NULL, obj_template);
context0->Enter();
@@ -8741,11 +8774,15 @@ THREADED_TEST(AccessControlGetOwnPropertyNames) {
// global object should be blocked by access checks on the global
// proxy object. Accessing the object that requires access checks
// is blocked by the access checks on the object itself.
- value = CompileRun("Object.getOwnPropertyNames(other).length == 0");
- CHECK(value.IsEmpty());
+ value = CompileRun(
+ "var names = Object.getOwnPropertyNames(other);"
+ "names.length == 1 && names[0] == 'accessible_prop';");
+ CHECK(value->BooleanValue());
- value = CompileRun("Object.getOwnPropertyNames(object).length == 0");
- CHECK(value.IsEmpty());
+ value = CompileRun(
+ "var names = Object.getOwnPropertyNames(object);"
+ "names.length == 1 && names[0] == 'accessible_prop';");
+ CHECK(value->BooleanValue());
context1->Exit();
context0->Exit();
@@ -11660,7 +11697,8 @@ static int GetGlobalObjectsCount() {
i::HeapIterator it(CcTest::heap());
for (i::HeapObject* object = it.next(); object != NULL; object = it.next())
if (object->IsJSGlobalObject()) count++;
- return count;
+ // Subtract one to compensate for the code stub context that is always present
+ return count - 1;
}
@@ -11869,6 +11907,7 @@ void HandleCreatingCallback1(
THREADED_TEST(NoGlobalHandlesOrphaningDueToWeakCallback) {
+ v8::Locker locker(CcTest::isolate());
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
@@ -11884,6 +11923,7 @@ THREADED_TEST(NoGlobalHandlesOrphaningDueToWeakCallback) {
handle3.SetWeak(&handle3, HandleCreatingCallback1,
v8::WeakCallbackType::kParameter);
CcTest::heap()->CollectAllGarbage();
+ EmptyMessageQueues(isolate);
}
@@ -12534,6 +12574,17 @@ THREADED_TEST(ExternalAllocatedMemory) {
}
+TEST(Regress51719) {
+ i::FLAG_incremental_marking = false;
+ CcTest::InitializeVM();
+
+ const int64_t kTriggerGCSize =
+ v8::internal::Internals::kExternalAllocationLimit + 1;
+ v8::Isolate* isolate = CcTest::isolate();
+ isolate->AdjustAmountOfExternalAllocatedMemory(kTriggerGCSize);
+}
+
+
// Regression test for issue 54, object templates with internal fields
// but no accessors or interceptors did not get their internal field
// count set on instances.
@@ -14094,58 +14145,58 @@ void TypedArrayTestHelper(i::ExternalArrayType array_type, int64_t low,
THREADED_TEST(Uint8Array) {
- TypedArrayTestHelper<uint8_t, v8::Uint8Array, i::ExternalUint8Array,
+ TypedArrayTestHelper<uint8_t, v8::Uint8Array, i::FixedUint8Array,
v8::ArrayBuffer>(i::kExternalUint8Array, 0, 0xFF);
}
THREADED_TEST(Int8Array) {
- TypedArrayTestHelper<int8_t, v8::Int8Array, i::ExternalInt8Array,
+ TypedArrayTestHelper<int8_t, v8::Int8Array, i::FixedInt8Array,
v8::ArrayBuffer>(i::kExternalInt8Array, -0x80, 0x7F);
}
THREADED_TEST(Uint16Array) {
- TypedArrayTestHelper<uint16_t, v8::Uint16Array, i::ExternalUint16Array,
+ TypedArrayTestHelper<uint16_t, v8::Uint16Array, i::FixedUint16Array,
v8::ArrayBuffer>(i::kExternalUint16Array, 0, 0xFFFF);
}
THREADED_TEST(Int16Array) {
- TypedArrayTestHelper<int16_t, v8::Int16Array, i::ExternalInt16Array,
+ TypedArrayTestHelper<int16_t, v8::Int16Array, i::FixedInt16Array,
v8::ArrayBuffer>(i::kExternalInt16Array, -0x8000,
0x7FFF);
}
THREADED_TEST(Uint32Array) {
- TypedArrayTestHelper<uint32_t, v8::Uint32Array, i::ExternalUint32Array,
+ TypedArrayTestHelper<uint32_t, v8::Uint32Array, i::FixedUint32Array,
v8::ArrayBuffer>(i::kExternalUint32Array, 0, UINT_MAX);
}
THREADED_TEST(Int32Array) {
- TypedArrayTestHelper<int32_t, v8::Int32Array, i::ExternalInt32Array,
+ TypedArrayTestHelper<int32_t, v8::Int32Array, i::FixedInt32Array,
v8::ArrayBuffer>(i::kExternalInt32Array, INT_MIN,
INT_MAX);
}
THREADED_TEST(Float32Array) {
- TypedArrayTestHelper<float, v8::Float32Array, i::ExternalFloat32Array,
+ TypedArrayTestHelper<float, v8::Float32Array, i::FixedFloat32Array,
v8::ArrayBuffer>(i::kExternalFloat32Array, -500, 500);
}
THREADED_TEST(Float64Array) {
- TypedArrayTestHelper<double, v8::Float64Array, i::ExternalFloat64Array,
+ TypedArrayTestHelper<double, v8::Float64Array, i::FixedFloat64Array,
v8::ArrayBuffer>(i::kExternalFloat64Array, -500, 500);
}
THREADED_TEST(Uint8ClampedArray) {
TypedArrayTestHelper<uint8_t, v8::Uint8ClampedArray,
- i::ExternalUint8ClampedArray, v8::ArrayBuffer>(
+ i::FixedUint8ClampedArray, v8::ArrayBuffer>(
i::kExternalUint8ClampedArray, 0, 0xFF);
}
@@ -14169,38 +14220,16 @@ THREADED_TEST(DataView) {
}
-THREADED_TEST(SkipArrayBufferBackingStoreDuringGC) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- // Make sure the pointer looks like a heap object
- uint8_t* store_ptr = reinterpret_cast<uint8_t*>(i::kHeapObjectTag);
-
- // Create ArrayBuffer with pointer-that-cannot-be-visited in the backing store
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, store_ptr, 8);
-
- // Should not crash
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage();
-
- // Should not move the pointer
- CHECK_EQ(ab->GetContents().Data(), store_ptr);
-}
-
-
THREADED_TEST(SharedUint8Array) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<uint8_t, v8::Uint8Array, i::ExternalUint8Array,
+ TypedArrayTestHelper<uint8_t, v8::Uint8Array, i::FixedUint8Array,
v8::SharedArrayBuffer>(i::kExternalUint8Array, 0, 0xFF);
}
THREADED_TEST(SharedInt8Array) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<int8_t, v8::Int8Array, i::ExternalInt8Array,
+ TypedArrayTestHelper<int8_t, v8::Int8Array, i::FixedInt8Array,
v8::SharedArrayBuffer>(i::kExternalInt8Array, -0x80,
0x7F);
}
@@ -14208,7 +14237,7 @@ THREADED_TEST(SharedInt8Array) {
THREADED_TEST(SharedUint16Array) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<uint16_t, v8::Uint16Array, i::ExternalUint16Array,
+ TypedArrayTestHelper<uint16_t, v8::Uint16Array, i::FixedUint16Array,
v8::SharedArrayBuffer>(i::kExternalUint16Array, 0,
0xFFFF);
}
@@ -14216,7 +14245,7 @@ THREADED_TEST(SharedUint16Array) {
THREADED_TEST(SharedInt16Array) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<int16_t, v8::Int16Array, i::ExternalInt16Array,
+ TypedArrayTestHelper<int16_t, v8::Int16Array, i::FixedInt16Array,
v8::SharedArrayBuffer>(i::kExternalInt16Array, -0x8000,
0x7FFF);
}
@@ -14224,7 +14253,7 @@ THREADED_TEST(SharedInt16Array) {
THREADED_TEST(SharedUint32Array) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<uint32_t, v8::Uint32Array, i::ExternalUint32Array,
+ TypedArrayTestHelper<uint32_t, v8::Uint32Array, i::FixedUint32Array,
v8::SharedArrayBuffer>(i::kExternalUint32Array, 0,
UINT_MAX);
}
@@ -14232,7 +14261,7 @@ THREADED_TEST(SharedUint32Array) {
THREADED_TEST(SharedInt32Array) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<int32_t, v8::Int32Array, i::ExternalInt32Array,
+ TypedArrayTestHelper<int32_t, v8::Int32Array, i::FixedInt32Array,
v8::SharedArrayBuffer>(i::kExternalInt32Array, INT_MIN,
INT_MAX);
}
@@ -14240,7 +14269,7 @@ THREADED_TEST(SharedInt32Array) {
THREADED_TEST(SharedFloat32Array) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<float, v8::Float32Array, i::ExternalFloat32Array,
+ TypedArrayTestHelper<float, v8::Float32Array, i::FixedFloat32Array,
v8::SharedArrayBuffer>(i::kExternalFloat32Array, -500,
500);
}
@@ -14248,7 +14277,7 @@ THREADED_TEST(SharedFloat32Array) {
THREADED_TEST(SharedFloat64Array) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<double, v8::Float64Array, i::ExternalFloat64Array,
+ TypedArrayTestHelper<double, v8::Float64Array, i::FixedFloat64Array,
v8::SharedArrayBuffer>(i::kExternalFloat64Array, -500,
500);
}
@@ -14257,7 +14286,7 @@ THREADED_TEST(SharedFloat64Array) {
THREADED_TEST(SharedUint8ClampedArray) {
i::FLAG_harmony_sharedarraybuffer = true;
TypedArrayTestHelper<uint8_t, v8::Uint8ClampedArray,
- i::ExternalUint8ClampedArray, v8::SharedArrayBuffer>(
+ i::FixedUint8ClampedArray, v8::SharedArrayBuffer>(
i::kExternalUint8ClampedArray, 0, 0xFF);
}
@@ -16532,118 +16561,6 @@ TEST(GCCallbacks) {
}
-THREADED_TEST(AddToJSFunctionResultCache) {
- i::FLAG_stress_compaction = false;
- i::FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(CcTest::isolate());
-
- LocalContext context;
-
- const char* code =
- "(function() {"
- " var key0 = 'a';"
- " var key1 = 'b';"
- " var r0 = %_GetFromCache(0, key0);"
- " var r1 = %_GetFromCache(0, key1);"
- " var r0_ = %_GetFromCache(0, key0);"
- " if (r0 !== r0_)"
- " return 'Different results for ' + key0 + ': ' + r0 + ' vs. ' + r0_;"
- " var r1_ = %_GetFromCache(0, key1);"
- " if (r1 !== r1_)"
- " return 'Different results for ' + key1 + ': ' + r1 + ' vs. ' + r1_;"
- " return 'PASSED';"
- "})()";
- CcTest::heap()->ClearJSFunctionResultCaches();
- ExpectString(code, "PASSED");
-}
-
-
-THREADED_TEST(FillJSFunctionResultCache) {
- i::FLAG_allow_natives_syntax = true;
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
-
- const char* code =
- "(function() {"
- " var k = 'a';"
- " var r = %_GetFromCache(0, k);"
- " for (var i = 0; i < 16; i++) {"
- " %_GetFromCache(0, 'a' + i);"
- " };"
- " if (r === %_GetFromCache(0, k))"
- " return 'FAILED: k0CacheSize is too small';"
- " return 'PASSED';"
- "})()";
- CcTest::heap()->ClearJSFunctionResultCaches();
- ExpectString(code, "PASSED");
-}
-
-
-THREADED_TEST(RoundRobinGetFromCache) {
- i::FLAG_allow_natives_syntax = true;
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
-
- const char* code =
- "(function() {"
- " var keys = [];"
- " for (var i = 0; i < 16; i++) keys.push(i);"
- " var values = [];"
- " for (var i = 0; i < 16; i++) values[i] = %_GetFromCache(0, keys[i]);"
- " for (var i = 0; i < 16; i++) {"
- " var v = %_GetFromCache(0, keys[i]);"
- " if (v.toString() !== values[i].toString())"
- " return 'Wrong value for ' + "
- " keys[i] + ': ' + v + ' vs. ' + values[i];"
- " };"
- " return 'PASSED';"
- "})()";
- CcTest::heap()->ClearJSFunctionResultCaches();
- ExpectString(code, "PASSED");
-}
-
-
-THREADED_TEST(ReverseGetFromCache) {
- i::FLAG_allow_natives_syntax = true;
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
-
- const char* code =
- "(function() {"
- " var keys = [];"
- " for (var i = 0; i < 16; i++) keys.push(i);"
- " var values = [];"
- " for (var i = 0; i < 16; i++) values[i] = %_GetFromCache(0, keys[i]);"
- " for (var i = 15; i >= 16; i--) {"
- " var v = %_GetFromCache(0, keys[i]);"
- " if (v !== values[i])"
- " return 'Wrong value for ' + "
- " keys[i] + ': ' + v + ' vs. ' + values[i];"
- " };"
- " return 'PASSED';"
- "})()";
- CcTest::heap()->ClearJSFunctionResultCaches();
- ExpectString(code, "PASSED");
-}
-
-
-THREADED_TEST(TestEviction) {
- i::FLAG_allow_natives_syntax = true;
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
-
- const char* code =
- "(function() {"
- " for (var i = 0; i < 2*16; i++) {"
- " %_GetFromCache(0, 'a' + i);"
- " };"
- " return 'PASSED';"
- "})()";
- CcTest::heap()->ClearJSFunctionResultCaches();
- ExpectString(code, "PASSED");
-}
-
-
THREADED_TEST(TwoByteStringInOneByteCons) {
// See Chromium issue 47824.
LocalContext context;
@@ -16741,8 +16658,8 @@ TEST(ContainsOnlyOneByte) {
const int length = 512;
// Ensure word aligned assignment.
const int aligned_length = length*sizeof(uintptr_t)/sizeof(uint16_t);
- i::SmartArrayPointer<uintptr_t>
- aligned_contents(new uintptr_t[aligned_length]);
+ v8::base::SmartArrayPointer<uintptr_t> aligned_contents(
+ new uintptr_t[aligned_length]);
uint16_t* string_contents =
reinterpret_cast<uint16_t*>(aligned_contents.get());
// Set to contain only one byte.
@@ -16994,8 +16911,6 @@ TEST(VerifyArrayPrototypeGuarantees) {
// Break fast array hole handling by prototype structure changes.
BreakArrayGuarantees("[].__proto__.__proto__ = { funny: true };");
// By sending elements to dictionary mode.
- BreakArrayGuarantees("Object.freeze(Array.prototype);");
- BreakArrayGuarantees("Object.freeze(Object.prototype);");
BreakArrayGuarantees(
"Object.defineProperty(Array.prototype, 0, {"
" get: function() { return 3; }});");
@@ -17235,10 +17150,12 @@ class InitDefaultIsolateThread : public v8::base::Thread {
void Run() {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ const intptr_t pageSizeMult =
+ v8::internal::Page::kPageSize / v8::internal::MB;
switch (testCase_) {
case SetResourceConstraints: {
- create_params.constraints.set_max_semi_space_size(1);
- create_params.constraints.set_max_old_space_size(4);
+ create_params.constraints.set_max_semi_space_size(1 * pageSizeMult);
+ create_params.constraints.set_max_old_space_size(4 * pageSizeMult);
break;
}
default:
@@ -18124,20 +18041,20 @@ THREADED_TEST(Regress93759) {
CHECK(result1->Equals(simple_object->GetPrototype()));
Local<Value> result2 = CompileRun("Object.getPrototypeOf(protected)");
- CHECK(result2.IsEmpty());
+ CHECK(result2->IsNull());
Local<Value> result3 = CompileRun("Object.getPrototypeOf(global)");
CHECK(result3->Equals(global_object->GetPrototype()));
Local<Value> result4 = CompileRun("Object.getPrototypeOf(proxy)");
- CHECK(result4.IsEmpty());
+ CHECK(result4->IsNull());
Local<Value> result5 = CompileRun("Object.getPrototypeOf(hidden)");
CHECK(result5->Equals(
object_with_hidden->GetPrototype()->ToObject(isolate)->GetPrototype()));
Local<Value> result6 = CompileRun("Object.getPrototypeOf(phidden)");
- CHECK(result6.IsEmpty());
+ CHECK(result6->IsNull());
}
@@ -19376,15 +19293,15 @@ TEST(AccessCheckThrows) {
CheckCorrectThrow("%GetProperty(other, 'x')");
CheckCorrectThrow("%SetProperty(other, 'x', 'foo', 0)");
CheckCorrectThrow("%AddNamedProperty(other, 'x', 'foo', 1)");
- CheckCorrectThrow("%DeleteProperty(other, 'x', 0)");
- CheckCorrectThrow("%DeleteProperty(other, '1', 0)");
+ CheckCorrectThrow("%DeleteProperty_Sloppy(other, 'x')");
+ CheckCorrectThrow("%DeleteProperty_Strict(other, 'x')");
+ CheckCorrectThrow("%DeleteProperty_Sloppy(other, '1')");
+ CheckCorrectThrow("%DeleteProperty_Strict(other, '1')");
CheckCorrectThrow("%HasOwnProperty(other, 'x')");
CheckCorrectThrow("%HasProperty(other, 'x')");
CheckCorrectThrow("%HasElement(other, 1)");
CheckCorrectThrow("%IsPropertyEnumerable(other, 'x')");
- CheckCorrectThrow("%GetPropertyNames(other)");
// PROPERTY_ATTRIBUTES_NONE = 0
- CheckCorrectThrow("%GetOwnPropertyNames(other, 0)");
CheckCorrectThrow("%DefineAccessorPropertyUnchecked("
"other, 'x', null, null, 1)");
@@ -20961,43 +20878,50 @@ TEST(StreamingProducesParserCache) {
}
-TEST(StreamingWithDebuggingDoesNotProduceParserCache) {
- // If the debugger is active, we should just not produce parser cache at
- // all. This is a regeression test: We used to produce a parser cache without
- // any data in it (just headers).
+TEST(StreamingWithDebuggingEnabledLate) {
+ // The streaming parser can only parse lazily, i.e. inner functions are not
+ // fully parsed. However, we may compile inner functions eagerly when
+ // debugging. Make sure that we can deal with this when turning on debugging
+ // after streaming parser has already finished parsing.
i::FLAG_min_preparse_length = 0;
- const char* chunks[] = {"function foo() { ret", "urn 13; } f", "oo(); ",
+ const char* chunks[] = {"with({x:1}) {",
+ " var foo = function foo(y) {",
+ " return x + y;",
+ " };",
+ " foo(2);",
+ "}",
NULL};
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
-
- // Make the debugger active by setting a breakpoint.
- CompileRun("function break_here() { }");
- i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(
- v8::Utils::OpenHandle(*env->Global()->Get(v8_str("break_here"))));
- EnableDebugger();
- v8::internal::Debug* debug = CcTest::i_isolate()->debug();
- int position = 0;
- debug->SetBreakPoint(func, i::Handle<i::Object>(v8::internal::Smi::FromInt(1),
- CcTest::i_isolate()),
- &position);
+ v8::TryCatch try_catch(isolate);
v8::ScriptCompiler::StreamedSource source(
new TestSourceStream(chunks),
v8::ScriptCompiler::StreamedSource::ONE_BYTE);
v8::ScriptCompiler::ScriptStreamingTask* task =
- v8::ScriptCompiler::StartStreamingScript(
- isolate, &source, v8::ScriptCompiler::kProduceParserCache);
+ v8::ScriptCompiler::StartStreamingScript(isolate, &source);
- // TestSourceStream::GetMoreData won't block, so it's OK to just run the
- // task here in the main thread.
task->Run();
delete task;
- // Check that we got no cached data.
- CHECK(source.GetCachedData() == NULL);
+ CHECK(!try_catch.HasCaught());
+
+ v8::ScriptOrigin origin(v8_str("http://foo.com"));
+ char* full_source = TestSourceStream::FullSourceString(chunks);
+
+ EnableDebugger();
+
+ v8::Handle<Script> script = v8::ScriptCompiler::Compile(
+ isolate, &source, v8_str(full_source), origin);
+
+ Maybe<uint32_t> result =
+ script->Run(env.local()).ToLocalChecked()->Uint32Value(env.local());
+ CHECK_EQ(3U, result.FromMaybe(0));
+
+ delete[] full_source;
+
DisableDebugger();
}
@@ -21256,11 +21180,7 @@ TEST(TurboAsmDisablesNeuter) {
v8::V8::Initialize();
v8::HandleScope scope(CcTest::isolate());
LocalContext context;
-#if V8_TURBOFAN_TARGET
bool should_be_neuterable = !i::FLAG_turbo_asm;
-#else
- bool should_be_neuterable = true;
-#endif
const char* load =
"function Module(stdlib, foreign, heap) {"
" 'use asm';"
@@ -21305,14 +21225,10 @@ TEST(GetPrototypeAccessControl) {
env->Global()->Set(v8_str("prohibited"), obj_template->NewInstance());
- {
- v8::TryCatch try_catch(isolate);
- CompileRun(
- "function f() { %_GetPrototype(prohibited); }"
- "%OptimizeFunctionOnNextCall(f);"
- "f();");
- CHECK(try_catch.HasCaught());
- }
+ CHECK(CompileRun(
+ "function f() { return %_GetPrototype(prohibited); }"
+ "%OptimizeFunctionOnNextCall(f);"
+ "f();")->IsNull());
}
@@ -21714,19 +21630,19 @@ TEST(ExtrasExportsObject) {
// standalone.gypi ensures we include the test-extra.js file, which should
// export the tested functions.
- v8::Local<v8::Object> exports = env->GetExtrasExportsObject();
+ v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
auto func =
- exports->Get(v8_str("testExtraShouldReturnFive")).As<v8::Function>();
+ binding->Get(v8_str("testExtraShouldReturnFive")).As<v8::Function>();
auto undefined = v8::Undefined(isolate);
auto result = func->Call(undefined, 0, {}).As<v8::Number>();
CHECK_EQ(5, result->Int32Value());
v8::Handle<v8::FunctionTemplate> runtimeFunction =
v8::FunctionTemplate::New(isolate, ExtrasExportsTestRuntimeFunction);
- exports->Set(v8_str("runtime"), runtimeFunction->GetFunction());
+ binding->Set(v8_str("runtime"), runtimeFunction->GetFunction());
func =
- exports->Get(v8_str("testExtraShouldCallToRuntime")).As<v8::Function>();
+ binding->Get(v8_str("testExtraShouldCallToRuntime")).As<v8::Function>();
result = func->Call(undefined, 0, {}).As<v8::Number>();
CHECK_EQ(7, result->Int32Value());
}
@@ -21755,7 +21671,6 @@ TEST(Map) {
CHECK_EQ(3, contents->Get(2).As<v8::Int32>()->Value());
CHECK_EQ(4, contents->Get(3).As<v8::Int32>()->Value());
- map = v8::Map::FromArray(env.local(), contents).ToLocalChecked();
CHECK_EQ(2U, map->Size());
CHECK(map->Has(env.local(), v8::Integer::New(isolate, 1)).FromJust());
@@ -21789,16 +21704,6 @@ TEST(Map) {
}
-TEST(MapFromArrayOddLength) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
- LocalContext env;
- // Odd lengths result in a null MaybeLocal.
- Local<v8::Array> contents = v8::Array::New(isolate, 41);
- CHECK(v8::Map::FromArray(env.local(), contents).IsEmpty());
-}
-
-
TEST(Set) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
@@ -21820,7 +21725,6 @@ TEST(Set) {
CHECK_EQ(1, keys->Get(0).As<v8::Int32>()->Value());
CHECK_EQ(2, keys->Get(1).As<v8::Int32>()->Value());
- set = v8::Set::FromArray(env.local(), keys).ToLocalChecked();
CHECK_EQ(2U, set->Size());
CHECK(set->Has(env.local(), v8::Integer::New(isolate, 1)).FromJust());
@@ -21881,33 +21785,45 @@ TEST(CompatibleReceiverCheckOnCachedICHandler) {
0);
}
+class FutexInterruptionThread : public v8::base::Thread {
+ public:
+ explicit FutexInterruptionThread(v8::Isolate* isolate)
+ : Thread(Options("FutexInterruptionThread")), isolate_(isolate) {}
-static int nb_uncaught_exception_callback_calls = 0;
-
+ virtual void Run() {
+ // Wait a bit before terminating.
+ v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(100));
+ v8::V8::TerminateExecution(isolate_);
+ }
-bool NoAbortOnUncaughtException(v8::Isolate* isolate) {
- ++nb_uncaught_exception_callback_calls;
- return false;
-}
+ private:
+ v8::Isolate* isolate_;
+};
-TEST(AbortOnUncaughtExceptionNoAbort) {
+TEST(FutexInterruption) {
+ i::FLAG_harmony_sharedarraybuffer = true;
+ i::FLAG_harmony_atomics = true;
v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> global_template =
- v8::ObjectTemplate::New(isolate);
- LocalContext env(NULL, global_template);
+ v8::HandleScope scope(isolate);
+ LocalContext env;
- i::FLAG_abort_on_uncaught_exception = true;
- isolate->SetAbortOnUncaughtExceptionCallback(NoAbortOnUncaughtException);
+ FutexInterruptionThread timeout_thread(isolate);
- CompileRun("function boom() { throw new Error(\"boom\") }");
+ v8::TryCatch try_catch(CcTest::isolate());
+ timeout_thread.Start();
- v8::Local<v8::Object> global_object = env->Global();
- v8::Local<v8::Function> foo =
- v8::Local<v8::Function>::Cast(global_object->Get(v8_str("boom")));
+ CompileRun(
+ "var ab = new SharedArrayBuffer(4);"
+ "var i32a = new Int32Array(ab);"
+ "Atomics.futexWait(i32a, 0, 0);");
+ CHECK(try_catch.HasTerminated());
+}
- foo->Call(global_object, 0, NULL);
- CHECK_EQ(1, nb_uncaught_exception_callback_calls);
+TEST(EstimatedContextSize) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ LocalContext env;
+ CHECK(50000 < env->EstimatedSize());
}
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 0500bb3c1a..3c2f840058 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -2970,61 +2970,6 @@ TEST(ldp_stp_offset_wide) {
}
-TEST(ldnp_stnp_offset) {
- INIT_V8();
- SETUP();
-
- uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
- 0xffeeddccbbaa9988UL};
- uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
- uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
- uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
-
- START();
- __ Mov(x16, src_base);
- __ Mov(x17, dst_base);
- __ Mov(x18, src_base + 24);
- __ Mov(x19, dst_base + 56);
- __ Ldnp(w0, w1, MemOperand(x16));
- __ Ldnp(w2, w3, MemOperand(x16, 4));
- __ Ldnp(x4, x5, MemOperand(x16, 8));
- __ Ldnp(w6, w7, MemOperand(x18, -12));
- __ Ldnp(x8, x9, MemOperand(x18, -16));
- __ Stnp(w0, w1, MemOperand(x17));
- __ Stnp(w2, w3, MemOperand(x17, 8));
- __ Stnp(x4, x5, MemOperand(x17, 16));
- __ Stnp(w6, w7, MemOperand(x19, -24));
- __ Stnp(x8, x9, MemOperand(x19, -16));
- END();
-
- RUN();
-
- CHECK_EQUAL_64(0x44556677, x0);
- CHECK_EQUAL_64(0x00112233, x1);
- CHECK_EQUAL_64(0x0011223344556677UL, dst[0]);
- CHECK_EQUAL_64(0x00112233, x2);
- CHECK_EQUAL_64(0xccddeeff, x3);
- CHECK_EQUAL_64(0xccddeeff00112233UL, dst[1]);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
- CHECK_EQUAL_64(0x8899aabb, x6);
- CHECK_EQUAL_64(0xbbaa9988, x7);
- CHECK_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, x8);
- CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x9);
- CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
- CHECK_EQUAL_64(src_base, x16);
- CHECK_EQUAL_64(dst_base, x17);
- CHECK_EQUAL_64(src_base + 24, x18);
- CHECK_EQUAL_64(dst_base + 56, x19);
-
- TEARDOWN();
-}
-
-
TEST(ldp_stp_preindex) {
INIT_V8();
SETUP();
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 13abbbb447..63c9172f56 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -1458,18 +1458,18 @@ TEST(min_max) {
} TestFloat;
TestFloat test;
- const double dblNaN = std::numeric_limits<double>::quiet_NaN();
- const float fltNaN = std::numeric_limits<float>::quiet_NaN();
- const int tableLength = 5;
- double inputsa[tableLength] = {2.0, 3.0, dblNaN, 3.0, dblNaN};
- double inputsb[tableLength] = {3.0, 2.0, 3.0, dblNaN, dblNaN};
- double outputsdmin[tableLength] = {2.0, 2.0, 3.0, 3.0, dblNaN};
- double outputsdmax[tableLength] = {3.0, 3.0, 3.0, 3.0, dblNaN};
-
- float inputse[tableLength] = {2.0, 3.0, fltNaN, 3.0, fltNaN};
- float inputsf[tableLength] = {3.0, 2.0, 3.0, fltNaN, fltNaN};
- float outputsfmin[tableLength] = {2.0, 2.0, 3.0, 3.0, fltNaN};
- float outputsfmax[tableLength] = {3.0, 3.0, 3.0, 3.0, fltNaN};
+ const double double_nan = std::numeric_limits<double>::quiet_NaN();
+ const float float_nan = std::numeric_limits<float>::quiet_NaN();
+ const int kTableLength = 5;
+ double inputsa[kTableLength] = {2.0, 3.0, double_nan, 3.0, double_nan};
+ double inputsb[kTableLength] = {3.0, 2.0, 3.0, double_nan, double_nan};
+ double outputsdmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, double_nan};
+ double outputsdmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, double_nan};
+
+ float inputse[kTableLength] = {2.0, 3.0, float_nan, 3.0, float_nan};
+ float inputsf[kTableLength] = {3.0, 2.0, 3.0, float_nan, float_nan};
+ float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, float_nan};
+ float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, float_nan};
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
__ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
@@ -1491,7 +1491,7 @@ TEST(min_max) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
test.e = inputse[i];
@@ -1499,7 +1499,7 @@ TEST(min_max) {
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- if (i < tableLength - 1) {
+ if (i < kTableLength - 1) {
CHECK_EQ(test.c, outputsdmin[i]);
CHECK_EQ(test.d, outputsdmax[i]);
CHECK_EQ(test.g, outputsfmin[i]);
@@ -1517,7 +1517,7 @@ TEST(min_max) {
TEST(rint_d) {
if (IsMipsArchVariant(kMips32r6)) {
- const int tableLength = 30;
+ const int kTableLength = 30;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1530,7 +1530,7 @@ TEST(rint_d) {
}TestFloat;
TestFloat test;
- double inputs[tableLength] = {18446744073709551617.0,
+ double inputs[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
1.7976931348623157E+308, 6.27463370218383111104242366943E-307,
@@ -1542,7 +1542,7 @@ TEST(rint_d) {
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- double outputs_RN[tableLength] = {18446744073709551617.0,
+ double outputs_RN[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
1.7976931348623157E308, 0,
@@ -1554,7 +1554,7 @@ TEST(rint_d) {
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- double outputs_RZ[tableLength] = {18446744073709551617.0,
+ double outputs_RZ[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
1.7976931348623157E308, 0,
@@ -1566,7 +1566,7 @@ TEST(rint_d) {
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- double outputs_RP[tableLength] = {18446744073709551617.0,
+ double outputs_RP[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
1.7976931348623157E308, 1,
@@ -1578,7 +1578,7 @@ TEST(rint_d) {
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- double outputs_RM[tableLength] = {18446744073709551617.0,
+ double outputs_RM[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
1.7976931348623157E308, 0,
@@ -1611,7 +1611,7 @@ TEST(rint_d) {
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
@@ -1696,7 +1696,7 @@ TEST(sel) {
TEST(rint_s) {
if (IsMipsArchVariant(kMips32r6)) {
- const int tableLength = 30;
+ const int kTableLength = 30;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1709,7 +1709,7 @@ TEST(rint_s) {
}TestFloat;
TestFloat test;
- float inputs[tableLength] = {18446744073709551617.0,
+ float inputs[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
1.7976931348623157E+38, 6.27463370218383111104242366943E-37,
@@ -1721,7 +1721,7 @@ TEST(rint_s) {
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- float outputs_RN[tableLength] = {18446744073709551617.0,
+ float outputs_RN[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
1.7976931348623157E38, 0,
@@ -1733,7 +1733,7 @@ TEST(rint_s) {
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- float outputs_RZ[tableLength] = {18446744073709551617.0,
+ float outputs_RZ[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
1.7976931348623157E38, 0,
@@ -1745,7 +1745,7 @@ TEST(rint_s) {
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- float outputs_RP[tableLength] = {18446744073709551617.0,
+ float outputs_RP[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
1.7976931348623157E38, 1,
@@ -1757,7 +1757,7 @@ TEST(rint_s) {
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- float outputs_RM[tableLength] = {18446744073709551617.0,
+ float outputs_RM[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
1.7976931348623157E38, 0,
@@ -1790,7 +1790,7 @@ TEST(rint_s) {
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
@@ -1802,11 +1802,13 @@ TEST(rint_s) {
TEST(mina_maxa) {
if (IsMipsArchVariant(kMips32r6)) {
- const int tableLength = 12;
+ const int kTableLength = 15;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
MacroAssembler assm(isolate, NULL, 0);
+ const double double_nan = std::numeric_limits<double>::quiet_NaN();
+ const float float_nan = std::numeric_limits<float>::quiet_NaN();
typedef struct test_float {
double a;
@@ -1820,53 +1822,37 @@ TEST(mina_maxa) {
}TestFloat;
TestFloat test;
- double inputsa[tableLength] = {
- 5.3, 4.8, 6.1,
- 9.8, 9.8, 9.8,
- -10.0, -8.9, -9.8,
- -10.0, -8.9, -9.8
+ double inputsa[kTableLength] = {
+ 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9,
+ -9.8, -10.0, -8.9, -9.8, double_nan, 3.0, double_nan
};
- double inputsb[tableLength] = {
- 4.8, 5.3, 6.1,
- -10.0, -8.9, -9.8,
- 9.8, 9.8, 9.8,
- -9.8, -11.2, -9.8
+ double inputsb[kTableLength] = {
+ 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8,
+ 9.8, -9.8, -11.2, -9.8, 3.0, double_nan, double_nan
};
- double resd[tableLength] = {
- 4.8, 4.8, 6.1,
- 9.8, -8.9, 9.8,
- 9.8, -8.9, 9.8,
- -9.8, -8.9, -9.8
+ double resd[kTableLength] = {
+ 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9,
+ -9.8, -9.8, -8.9, -9.8, 3.0, 3.0, double_nan
};
- double resd1[tableLength] = {
- 5.3, 5.3, 6.1,
- -10.0, 9.8, 9.8,
- -10.0, 9.8, 9.8,
- -10.0, -11.2, -9.8
+ double resd1[kTableLength] = {
+ 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8,
+ 9.8, -10.0, -11.2, -9.8, 3.0, 3.0, double_nan
};
- float inputsc[tableLength] = {
- 5.3, 4.8, 6.1,
- 9.8, 9.8, 9.8,
- -10.0, -8.9, -9.8,
- -10.0, -8.9, -9.8
+ float inputsc[kTableLength] = {
+ 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9,
+ -9.8, -10.0, -8.9, -9.8, float_nan, 3.0, float_nan
};
- float inputsd[tableLength] = {
- 4.8, 5.3, 6.1,
- -10.0, -8.9, -9.8,
- 9.8, 9.8, 9.8,
- -9.8, -11.2, -9.8
+ float inputsd[kTableLength] = {
+ 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8,
+ 9.8, -9.8, -11.2, -9.8, 3.0, float_nan, float_nan
};
- float resf[tableLength] = {
- 4.8, 4.8, 6.1,
- 9.8, -8.9, 9.8,
- 9.8, -8.9, 9.8,
- -9.8, -8.9, -9.8
+ float resf[kTableLength] = {
+ 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9,
+ -9.8, -9.8, -8.9, -9.8, 3.0, 3.0, float_nan
};
- float resf1[tableLength] = {
- 5.3, 5.3, 6.1,
- -10.0, 9.8, 9.8,
- -10.0, 9.8, 9.8,
- -10.0, -11.2, -9.8
+ float resf1[kTableLength] = {
+ 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8,
+ 9.8, -10.0, -11.2, -9.8, 3.0, 3.0, float_nan
};
__ ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
@@ -1889,16 +1875,23 @@ TEST(mina_maxa) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
test.c = inputsc[i];
test.d = inputsd[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.resd, resd[i]);
- CHECK_EQ(test.resf, resf[i]);
- CHECK_EQ(test.resd1, resd1[i]);
- CHECK_EQ(test.resf1, resf1[i]);
+ if (i < kTableLength - 1) {
+ CHECK_EQ(test.resd, resd[i]);
+ CHECK_EQ(test.resf, resf[i]);
+ CHECK_EQ(test.resd1, resd1[i]);
+ CHECK_EQ(test.resf1, resf1[i]);
+ } else {
+ DCHECK(std::isnan(test.resd));
+ DCHECK(std::isnan(test.resf));
+ DCHECK(std::isnan(test.resd1));
+ DCHECK(std::isnan(test.resf1));
+ }
}
}
}
@@ -1918,22 +1911,22 @@ TEST(trunc_l) {
int64_t c; // a trunc result
int64_t d; // b trunc result
}Test;
- const int tableLength = 15;
- double inputs_D[tableLength] = {
+ const int kTableLength = 15;
+ double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity()
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()
};
- double outputs[tableLength] = {
+ double outputs[kTableLength] = {
2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
2147483648.0, dFPU64InvalidResult,
@@ -1953,7 +1946,7 @@ TEST(trunc_l) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -1966,7 +1959,7 @@ TEST(trunc_l) {
TEST(movz_movn) {
if (IsMipsArchVariant(kMips32r2)) {
- const int tableLength = 4;
+ const int kTableLength = 4;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1987,17 +1980,17 @@ TEST(movz_movn) {
}TestFloat;
TestFloat test;
- double inputs_D[tableLength] = {
+ double inputs_D[kTableLength] = {
5.3, -5.3, 5.3, -2.9
};
- double inputs_S[tableLength] = {
+ double inputs_S[kTableLength] = {
4.8, 4.8, -4.8, -0.29
};
- float outputs_S[tableLength] = {
+ float outputs_S[kTableLength] = {
4.8, 4.8, -4.8, -0.29
};
- double outputs_D[tableLength] = {
+ double outputs_D[kTableLength] = {
5.3, -5.3, 5.3, -2.9
};
@@ -2029,7 +2022,7 @@ TEST(movz_movn) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.c = inputs_S[i];
@@ -2053,7 +2046,7 @@ TEST(movz_movn) {
TEST(movt_movd) {
if (IsMipsArchVariant(kMips32r2)) {
- const int tableLength = 4;
+ const int kTableLength = 4;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -2073,22 +2066,22 @@ TEST(movt_movd) {
}TestFloat;
TestFloat test;
- double inputs_D[tableLength] = {
+ double inputs_D[kTableLength] = {
5.3, -5.3, 20.8, -2.9
};
- double inputs_S[tableLength] = {
+ double inputs_S[kTableLength] = {
4.88, 4.8, -4.8, -0.29
};
- float outputs_S[tableLength] = {
+ float outputs_S[kTableLength] = {
4.88, 4.8, -4.8, -0.29
};
- double outputs_D[tableLength] = {
+ double outputs_D[kTableLength] = {
5.3, -5.3, 20.8, -2.9
};
int condition_flags[8] = {0, 1, 2, 3, 4, 5, 6, 7};
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.srcd = inputs_D[i];
test.srcf = inputs_S[i];
@@ -2160,8 +2153,8 @@ TEST(cvt_w_d) {
int32_t b;
int32_t fcsr;
}Test;
- const int tableLength = 24;
- double inputs[tableLength] = {
+ const int kTableLength = 24;
+ double inputs[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483637.0, 2147483638.0, 2147483639.0,
@@ -2169,28 +2162,28 @@ TEST(cvt_w_d) {
2147483643.0, 2147483644.0, 2147483645.0,
2147483646.0, 2147483647.0, 2147483653.0
};
- double outputs_RN[tableLength] = {
+ double outputs_RN[kTableLength] = {
2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
-2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
2147483637.0, 2147483638.0, 2147483639.0,
2147483640.0, 2147483641.0, 2147483642.0,
2147483643.0, 2147483644.0, 2147483645.0,
2147483646.0, 2147483647.0, kFPUInvalidResult};
- double outputs_RZ[tableLength] = {
+ double outputs_RZ[kTableLength] = {
2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
2147483637.0, 2147483638.0, 2147483639.0,
2147483640.0, 2147483641.0, 2147483642.0,
2147483643.0, 2147483644.0, 2147483645.0,
2147483646.0, 2147483647.0, kFPUInvalidResult};
- double outputs_RP[tableLength] = {
+ double outputs_RP[kTableLength] = {
3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
2147483637.0, 2147483638.0, 2147483639.0,
2147483640.0, 2147483641.0, 2147483642.0,
2147483643.0, 2147483644.0, 2147483645.0,
2147483646.0, 2147483647.0, kFPUInvalidResult};
- double outputs_RM[tableLength] = {
+ double outputs_RM[kTableLength] = {
2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
-3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
2147483637.0, 2147483638.0, 2147483639.0,
@@ -2217,7 +2210,7 @@ TEST(cvt_w_d) {
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
@@ -2238,22 +2231,22 @@ TEST(trunc_w) {
int32_t c; // a trunc result
int32_t d; // b trunc result
}Test;
- const int tableLength = 15;
- double inputs_D[tableLength] = {
+ const int kTableLength = 15;
+ double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity()
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()
};
- double outputs[tableLength] = {
+ double outputs[kTableLength] = {
2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
kFPUInvalidResult, kFPUInvalidResult,
@@ -2273,7 +2266,7 @@ TEST(trunc_w) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -2295,22 +2288,22 @@ TEST(round_w) {
int32_t c; // a trunc result
int32_t d; // b trunc result
}Test;
- const int tableLength = 15;
- double inputs_D[tableLength] = {
+ const int kTableLength = 15;
+ double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity()
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()
};
- double outputs[tableLength] = {
+ double outputs[kTableLength] = {
2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
-2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
kFPUInvalidResult, kFPUInvalidResult,
@@ -2330,7 +2323,7 @@ TEST(round_w) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -2353,22 +2346,22 @@ TEST(round_l) {
int64_t c;
int64_t d;
}Test;
- const int tableLength = 15;
- double inputs_D[tableLength] = {
+ const int kTableLength = 15;
+ double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity()
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()
};
- double outputs[tableLength] = {
+ double outputs[kTableLength] = {
2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
-2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
2147483648.0, dFPU64InvalidResult,
@@ -2388,7 +2381,7 @@ TEST(round_l) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -2400,7 +2393,7 @@ TEST(round_l) {
TEST(sub) {
- const int tableLength = 12;
+ const int kTableLength = 12;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -2416,27 +2409,27 @@ TEST(sub) {
}TestFloat;
TestFloat test;
- double inputfs_D[tableLength] = {
+ double inputfs_D[kTableLength] = {
5.3, 4.8, 2.9, -5.3, -4.8, -2.9,
5.3, 4.8, 2.9, -5.3, -4.8, -2.9
};
- double inputft_D[tableLength] = {
+ double inputft_D[kTableLength] = {
4.8, 5.3, 2.9, 4.8, 5.3, 2.9,
-4.8, -5.3, -2.9, -4.8, -5.3, -2.9
};
- double outputs_D[tableLength] = {
+ double outputs_D[kTableLength] = {
0.5, -0.5, 0.0, -10.1, -10.1, -5.8,
10.1, 10.1, 5.8, -0.5, 0.5, 0.0
};
- float inputfs_S[tableLength] = {
+ float inputfs_S[kTableLength] = {
5.3, 4.8, 2.9, -5.3, -4.8, -2.9,
5.3, 4.8, 2.9, -5.3, -4.8, -2.9
};
- float inputft_S[tableLength] = {
+ float inputft_S[kTableLength] = {
4.8, 5.3, 2.9, 4.8, 5.3, 2.9,
-4.8, -5.3, -2.9, -4.8, -5.3, -2.9
};
- float outputs_S[tableLength] = {
+ float outputs_S[kTableLength] = {
0.5, -0.5, 0.0, -10.1, -10.1, -5.8,
10.1, 10.1, 5.8, -0.5, 0.5, 0.0
};
@@ -2456,7 +2449,7 @@ TEST(sub) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
test.b = inputft_S[i];
test.c = inputfs_D[i];
@@ -2469,7 +2462,7 @@ TEST(sub) {
TEST(sqrt_rsqrt_recip) {
- const int tableLength = 4;
+ const int kTableLength = 4;
const double deltaDouble = 2E-15;
const float deltaFloat = 2E-7;
const float sqrt2_s = sqrt(2);
@@ -2491,18 +2484,18 @@ TEST(sqrt_rsqrt_recip) {
}TestFloat;
TestFloat test;
- double inputs_D[tableLength] = {
+ double inputs_D[kTableLength] = {
0.0L, 4.0L, 2.0L, 4e-28L
};
- double outputs_D[tableLength] = {
+ double outputs_D[kTableLength] = {
0.0L, 2.0L, sqrt2_d, 2e-14L
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
0.0, 4.0, 2.0, 4e-28
};
- float outputs_S[tableLength] = {
+ float outputs_S[kTableLength] = {
0.0, 2.0, sqrt2_s, 2e-14
};
@@ -2536,7 +2529,7 @@ TEST(sqrt_rsqrt_recip) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
float f1;
double d1;
test.a = inputs_S[i];
@@ -2573,7 +2566,7 @@ TEST(sqrt_rsqrt_recip) {
TEST(neg) {
- const int tableLength = 3;
+ const int kTableLength = 3;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -2587,18 +2580,18 @@ TEST(neg) {
}TestFloat;
TestFloat test;
- double inputs_D[tableLength] = {
+ double inputs_D[kTableLength] = {
0.0, 4.0, -2.0
};
- double outputs_D[tableLength] = {
+ double outputs_D[kTableLength] = {
0.0, -4.0, 2.0
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
0.0, 4.0, -2.0
};
- float outputs_S[tableLength] = {
+ float outputs_S[kTableLength] = {
0.0, -4.0, 2.0
};
__ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
@@ -2615,7 +2608,7 @@ TEST(neg) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_S[i];
test.c = inputs_D[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -2626,7 +2619,7 @@ TEST(neg) {
TEST(mul) {
- const int tableLength = 4;
+ const int kTableLength = 4;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -2642,17 +2635,17 @@ TEST(mul) {
}TestFloat;
TestFloat test;
- double inputfs_D[tableLength] = {
+ double inputfs_D[kTableLength] = {
5.3, -5.3, 5.3, -2.9
};
- double inputft_D[tableLength] = {
+ double inputft_D[kTableLength] = {
4.8, 4.8, -4.8, -0.29
};
- float inputfs_S[tableLength] = {
+ float inputfs_S[kTableLength] = {
5.3, -5.3, 5.3, -2.9
};
- float inputft_S[tableLength] = {
+ float inputft_S[kTableLength] = {
4.8, 4.8, -4.8, -0.29
};
@@ -2672,7 +2665,7 @@ TEST(mul) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
test.b = inputft_S[i];
test.c = inputfs_D[i];
@@ -2685,7 +2678,7 @@ TEST(mul) {
TEST(mov) {
- const int tableLength = 4;
+ const int kTableLength = 4;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -2699,17 +2692,17 @@ TEST(mov) {
}TestFloat;
TestFloat test;
- double inputs_D[tableLength] = {
+ double inputs_D[kTableLength] = {
5.3, -5.3, 5.3, -2.9
};
- double inputs_S[tableLength] = {
+ double inputs_S[kTableLength] = {
4.8, 4.8, -4.8, -0.29
};
- float outputs_S[tableLength] = {
+ float outputs_S[kTableLength] = {
4.8, 4.8, -4.8, -0.29
};
- double outputs_D[tableLength] = {
+ double outputs_D[kTableLength] = {
5.3, -5.3, 5.3, -2.9
};
@@ -2727,7 +2720,7 @@ TEST(mov) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.c = inputs_S[i];
@@ -2750,22 +2743,22 @@ TEST(floor_w) {
int32_t c; // a floor result
int32_t d; // b floor result
}Test;
- const int tableLength = 15;
- double inputs_D[tableLength] = {
+ const int kTableLength = 15;
+ double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity()
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()
};
- double outputs[tableLength] = {
+ double outputs[kTableLength] = {
2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
-3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
kFPUInvalidResult, kFPUInvalidResult,
@@ -2785,7 +2778,7 @@ TEST(floor_w) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -2808,22 +2801,22 @@ TEST(floor_l) {
int64_t c;
int64_t d;
}Test;
- const int tableLength = 15;
- double inputs_D[tableLength] = {
+ const int kTableLength = 15;
+ double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity()
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()
};
- double outputs[tableLength] = {
+ double outputs[kTableLength] = {
2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
-3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
2147483648.0, dFPU64InvalidResult,
@@ -2843,7 +2836,7 @@ TEST(floor_l) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -2866,22 +2859,22 @@ TEST(ceil_w) {
int32_t c; // a floor result
int32_t d; // b floor result
}Test;
- const int tableLength = 15;
- double inputs_D[tableLength] = {
+ const int kTableLength = 15;
+ double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity()
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()
};
- double outputs[tableLength] = {
+ double outputs[kTableLength] = {
3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
kFPUInvalidResult, kFPUInvalidResult,
@@ -2901,7 +2894,7 @@ TEST(ceil_w) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -2924,22 +2917,22 @@ TEST(ceil_l) {
int64_t c;
int64_t d;
}Test;
- const int tableLength = 15;
- double inputs_D[tableLength] = {
+ const int kTableLength = 15;
+ double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity()
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()
};
- double outputs[tableLength] = {
+ double outputs[kTableLength] = {
3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
2147483648.0, dFPU64InvalidResult,
@@ -2959,7 +2952,7 @@ TEST(ceil_l) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -5059,4 +5052,56 @@ TEST(r6_balc) {
}
+uint32_t run_bal(int16_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0);
+
+ __ mov(t0, ra);
+ __ bal(offset); // Equivalent for "BGEZAL zero_reg, offset".
+ __ nop();
+
+ __ mov(ra, t0);
+ __ jr(ra);
+ __ nop();
+
+ __ li(v0, 1);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+
+ uint32_t res =
+ reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+
+ return res;
+}
+
+
+TEST(bal) {
+ CcTest::InitializeVM();
+
+ struct TestCaseBal {
+ int16_t offset;
+ uint32_t expected_res;
+ };
+
+ struct TestCaseBal tc[] = {
+ // offset, expected_res
+ { 4, 1 },
+ };
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBal);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ CHECK_EQ(tc[i].expected_res, run_bal(tc[i].offset));
+ }
+}
+
+
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index bb7b05ca76..00e364cfce 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -1440,7 +1440,7 @@ TEST(MIPS16) {
}
-// ----------------------mips32r6 specific tests----------------------
+// ----------------------mips64r6 specific tests----------------------
TEST(seleqz_selnez) {
if (kArchVariant == kMips64r6) {
CcTest::InitializeVM();
@@ -1562,18 +1562,18 @@ TEST(min_max) {
} TestFloat;
TestFloat test;
- const double dblNaN = std::numeric_limits<double>::quiet_NaN();
- const float fltNaN = std::numeric_limits<float>::quiet_NaN();
- const int tableLength = 5;
- double inputsa[tableLength] = {2.0, 3.0, dblNaN, 3.0, dblNaN};
- double inputsb[tableLength] = {3.0, 2.0, 3.0, dblNaN, dblNaN};
- double outputsdmin[tableLength] = {2.0, 2.0, 3.0, 3.0, dblNaN};
- double outputsdmax[tableLength] = {3.0, 3.0, 3.0, 3.0, dblNaN};
-
- float inputse[tableLength] = {2.0, 3.0, fltNaN, 3.0, fltNaN};
- float inputsf[tableLength] = {3.0, 2.0, 3.0, fltNaN, fltNaN};
- float outputsfmin[tableLength] = {2.0, 2.0, 3.0, 3.0, fltNaN};
- float outputsfmax[tableLength] = {3.0, 3.0, 3.0, 3.0, fltNaN};
+ const double double_nan = std::numeric_limits<double>::quiet_NaN();
+ const float float_nan = std::numeric_limits<float>::quiet_NaN();
+ const int kTableLength = 5;
+ double inputsa[kTableLength] = {2.0, 3.0, double_nan, 3.0, double_nan};
+ double inputsb[kTableLength] = {3.0, 2.0, 3.0, double_nan, double_nan};
+ double outputsdmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, double_nan};
+ double outputsdmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, double_nan};
+
+ float inputse[kTableLength] = {2.0, 3.0, float_nan, 3.0, float_nan};
+ float inputsf[kTableLength] = {3.0, 2.0, 3.0, float_nan, float_nan};
+ float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, float_nan};
+ float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, float_nan};
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
__ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
@@ -1595,7 +1595,7 @@ TEST(min_max) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
test.e = inputse[i];
@@ -1603,7 +1603,7 @@ TEST(min_max) {
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- if (i < tableLength - 1) {
+ if (i < kTableLength - 1) {
CHECK_EQ(test.c, outputsdmin[i]);
CHECK_EQ(test.d, outputsdmax[i]);
CHECK_EQ(test.g, outputsfmin[i]);
@@ -1621,7 +1621,7 @@ TEST(min_max) {
TEST(rint_d) {
if (kArchVariant == kMips64r6) {
- const int tableLength = 30;
+ const int kTableLength = 30;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1634,7 +1634,7 @@ TEST(rint_d) {
}TestFloat;
TestFloat test;
- double inputs[tableLength] = {18446744073709551617.0,
+ double inputs[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
1.7976931348623157E+308, 6.27463370218383111104242366943E-307,
@@ -1646,7 +1646,7 @@ TEST(rint_d) {
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- double outputs_RN[tableLength] = {18446744073709551617.0,
+ double outputs_RN[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
1.7976931348623157E308, 0,
@@ -1658,7 +1658,7 @@ TEST(rint_d) {
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- double outputs_RZ[tableLength] = {18446744073709551617.0,
+ double outputs_RZ[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
1.7976931348623157E308, 0,
@@ -1670,7 +1670,7 @@ TEST(rint_d) {
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- double outputs_RP[tableLength] = {18446744073709551617.0,
+ double outputs_RP[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
1.7976931348623157E308, 1,
@@ -1682,7 +1682,7 @@ TEST(rint_d) {
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- double outputs_RM[tableLength] = {18446744073709551617.0,
+ double outputs_RM[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
1.7976931348623157E308, 0,
@@ -1713,7 +1713,7 @@ TEST(rint_d) {
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
@@ -1798,7 +1798,7 @@ TEST(sel) {
TEST(rint_s) {
if (kArchVariant == kMips64r6) {
- const int tableLength = 30;
+ const int kTableLength = 30;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1811,7 +1811,7 @@ TEST(rint_s) {
}TestFloat;
TestFloat test;
- float inputs[tableLength] = {18446744073709551617.0,
+ float inputs[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
1.7976931348623157E+38, 6.27463370218383111104242366943E-37,
@@ -1823,7 +1823,7 @@ TEST(rint_s) {
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- float outputs_RN[tableLength] = {18446744073709551617.0,
+ float outputs_RN[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
1.7976931348623157E38, 0,
@@ -1835,7 +1835,7 @@ TEST(rint_s) {
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- float outputs_RZ[tableLength] = {18446744073709551617.0,
+ float outputs_RZ[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
1.7976931348623157E38, 0,
@@ -1847,7 +1847,7 @@ TEST(rint_s) {
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- float outputs_RP[tableLength] = {18446744073709551617.0,
+ float outputs_RP[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
1.7976931348623157E38, 1,
@@ -1859,7 +1859,7 @@ TEST(rint_s) {
37778931862957161709582.0, 37778931862957161709583.0,
37778931862957161709584.0, 37778931862957161709585.0,
37778931862957161709586.0, 37778931862957161709587.0};
- float outputs_RM[tableLength] = {18446744073709551617.0,
+ float outputs_RM[kTableLength] = {18446744073709551617.0,
4503599627370496.0, -4503599627370496.0,
1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
1.7976931348623157E38, 0,
@@ -1892,7 +1892,7 @@ TEST(rint_s) {
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
@@ -1904,11 +1904,13 @@ TEST(rint_s) {
TEST(mina_maxa) {
if (kArchVariant == kMips64r6) {
- const int tableLength = 12;
+ const int kTableLength = 15;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
MacroAssembler assm(isolate, NULL, 0);
+ const double double_nan = std::numeric_limits<double>::quiet_NaN();
+ const float float_nan = std::numeric_limits<float>::quiet_NaN();
typedef struct test_float {
double a;
@@ -1922,53 +1924,37 @@ TEST(mina_maxa) {
}TestFloat;
TestFloat test;
- double inputsa[tableLength] = {
- 5.3, 4.8, 6.1,
- 9.8, 9.8, 9.8,
- -10.0, -8.9, -9.8,
- -10.0, -8.9, -9.8
+ double inputsa[kTableLength] = {
+ 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9,
+ -9.8, -10.0, -8.9, -9.8, double_nan, 3.0, double_nan
};
- double inputsb[tableLength] = {
- 4.8, 5.3, 6.1,
- -10.0, -8.9, -9.8,
- 9.8, 9.8, 9.8,
- -9.8, -11.2, -9.8
+ double inputsb[kTableLength] = {
+ 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8,
+ 9.8, -9.8, -11.2, -9.8, 3.0, double_nan, double_nan
};
- double resd[tableLength] = {
- 4.8, 4.8, 6.1,
- 9.8, -8.9, 9.8,
- 9.8, -8.9, 9.8,
- -9.8, -8.9, -9.8
+ double resd[kTableLength] = {
+ 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9,
+ -9.8, -9.8, -8.9, -9.8, 3.0, 3.0, double_nan
};
- double resd1[tableLength] = {
- 5.3, 5.3, 6.1,
- -10.0, 9.8, 9.8,
- -10.0, 9.8, 9.8,
- -10.0, -11.2, -9.8
+ double resd1[kTableLength] = {
+ 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8,
+ 9.8, -10.0, -11.2, -9.8, 3.0, 3.0, double_nan
};
- float inputsc[tableLength] = {
- 5.3, 4.8, 6.1,
- 9.8, 9.8, 9.8,
- -10.0, -8.9, -9.8,
- -10.0, -8.9, -9.8
+ float inputsc[kTableLength] = {
+ 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9,
+ -9.8, -10.0, -8.9, -9.8, float_nan, 3.0, float_nan
};
- float inputsd[tableLength] = {
- 4.8, 5.3, 6.1,
- -10.0, -8.9, -9.8,
- 9.8, 9.8, 9.8,
- -9.8, -11.2, -9.8
+ float inputsd[kTableLength] = {
+ 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8,
+ 9.8, -9.8, -11.2, -9.8, 3.0, float_nan, float_nan
};
- float resf[tableLength] = {
- 4.8, 4.8, 6.1,
- 9.8, -8.9, 9.8,
- 9.8, -8.9, 9.8,
- -9.8, -8.9, -9.8
+ float resf[kTableLength] = {
+ 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9,
+ -9.8, -9.8, -8.9, -9.8, 3.0, 3.0, float_nan
};
- float resf1[tableLength] = {
- 5.3, 5.3, 6.1,
- -10.0, 9.8, 9.8,
- -10.0, 9.8, 9.8,
- -10.0, -11.2, -9.8
+ float resf1[kTableLength] = {
+ 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8,
+ 9.8, -10.0, -11.2, -9.8, 3.0, 3.0, float_nan
};
__ ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
@@ -1991,24 +1977,31 @@ TEST(mina_maxa) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
test.b = inputsb[i];
test.c = inputsc[i];
test.d = inputsd[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.resd, resd[i]);
- CHECK_EQ(test.resf, resf[i]);
- CHECK_EQ(test.resd1, resd1[i]);
- CHECK_EQ(test.resf1, resf1[i]);
+ if (i < kTableLength - 1) {
+ CHECK_EQ(test.resd, resd[i]);
+ CHECK_EQ(test.resf, resf[i]);
+ CHECK_EQ(test.resd1, resd1[i]);
+ CHECK_EQ(test.resf1, resf1[i]);
+ } else {
+ DCHECK(std::isnan(test.resd));
+ DCHECK(std::isnan(test.resf));
+ DCHECK(std::isnan(test.resd1));
+ DCHECK(std::isnan(test.resf1));
+ }
}
}
}
-// ----------------------mips32r2 specific tests----------------------
+// ----------------------mips64r2 specific tests----------------------
TEST(trunc_l) {
if (kArchVariant == kMips64r2) {
CcTest::InitializeVM();
@@ -2022,22 +2015,22 @@ TEST(trunc_l) {
int64_t c; // a trunc result
int64_t d; // b trunc result
}Test;
- const int tableLength = 15;
- double inputs_D[tableLength] = {
+ const int kTableLength = 15;
+ double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity()
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()
};
- double outputs[tableLength] = {
+ double outputs[kTableLength] = {
2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
2147483648.0, dFPU64InvalidResult,
@@ -2057,7 +2050,7 @@ TEST(trunc_l) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -2070,7 +2063,7 @@ TEST(trunc_l) {
TEST(movz_movn) {
if (kArchVariant == kMips64r2) {
- const int tableLength = 4;
+ const int kTableLength = 4;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -2091,17 +2084,17 @@ TEST(movz_movn) {
}TestFloat;
TestFloat test;
- double inputs_D[tableLength] = {
+ double inputs_D[kTableLength] = {
5.3, -5.3, 5.3, -2.9
};
- double inputs_S[tableLength] = {
+ double inputs_S[kTableLength] = {
4.8, 4.8, -4.8, -0.29
};
- float outputs_S[tableLength] = {
+ float outputs_S[kTableLength] = {
4.8, 4.8, -4.8, -0.29
};
- double outputs_D[tableLength] = {
+ double outputs_D[kTableLength] = {
5.3, -5.3, 5.3, -2.9
};
@@ -2133,7 +2126,7 @@ TEST(movz_movn) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.c = inputs_S[i];
@@ -2157,7 +2150,7 @@ TEST(movz_movn) {
TEST(movt_movd) {
if (kArchVariant == kMips64r2) {
- const int tableLength = 4;
+ const int kTableLength = 4;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
typedef struct test_float {
@@ -2176,22 +2169,22 @@ TEST(movt_movd) {
}TestFloat;
TestFloat test;
- double inputs_D[tableLength] = {
+ double inputs_D[kTableLength] = {
5.3, -5.3, 20.8, -2.9
};
- double inputs_S[tableLength] = {
+ double inputs_S[kTableLength] = {
4.88, 4.8, -4.8, -0.29
};
- float outputs_S[tableLength] = {
+ float outputs_S[kTableLength] = {
4.88, 4.8, -4.8, -0.29
};
- double outputs_D[tableLength] = {
+ double outputs_D[kTableLength] = {
5.3, -5.3, 20.8, -2.9
};
int condition_flags[8] = {0, 1, 2, 3, 4, 5, 6, 7};
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.srcd = inputs_D[i];
test.srcf = inputs_S[i];
@@ -2264,8 +2257,8 @@ TEST(cvt_w_d) {
int32_t b;
int fcsr;
}Test;
- const int tableLength = 24;
- double inputs[tableLength] = {
+ const int kTableLength = 24;
+ double inputs[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483637.0, 2147483638.0, 2147483639.0,
@@ -2273,28 +2266,28 @@ TEST(cvt_w_d) {
2147483643.0, 2147483644.0, 2147483645.0,
2147483646.0, 2147483647.0, 2147483653.0
};
- double outputs_RN[tableLength] = {
+ double outputs_RN[kTableLength] = {
2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
-2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
2147483637.0, 2147483638.0, 2147483639.0,
2147483640.0, 2147483641.0, 2147483642.0,
2147483643.0, 2147483644.0, 2147483645.0,
2147483646.0, 2147483647.0, kFPUInvalidResult};
- double outputs_RZ[tableLength] = {
+ double outputs_RZ[kTableLength] = {
2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
2147483637.0, 2147483638.0, 2147483639.0,
2147483640.0, 2147483641.0, 2147483642.0,
2147483643.0, 2147483644.0, 2147483645.0,
2147483646.0, 2147483647.0, kFPUInvalidResult};
- double outputs_RP[tableLength] = {
+ double outputs_RP[kTableLength] = {
3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
2147483637.0, 2147483638.0, 2147483639.0,
2147483640.0, 2147483641.0, 2147483642.0,
2147483643.0, 2147483644.0, 2147483645.0,
2147483646.0, 2147483647.0, kFPUInvalidResult};
- double outputs_RM[tableLength] = {
+ double outputs_RM[kTableLength] = {
2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
-3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
2147483637.0, 2147483638.0, 2147483639.0,
@@ -2321,7 +2314,7 @@ TEST(cvt_w_d) {
F3 f = FUNCTION_CAST<F3>(code->entry());
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
@@ -2342,22 +2335,22 @@ TEST(trunc_w) {
int32_t c; // a trunc result
int32_t d; // b trunc result
}Test;
- const int tableLength = 15;
- double inputs_D[tableLength] = {
+ const int kTableLength = 15;
+ double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity()
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()
};
- double outputs[tableLength] = {
+ double outputs[kTableLength] = {
2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
kFPUInvalidResult, kFPUInvalidResult,
@@ -2377,7 +2370,7 @@ TEST(trunc_w) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -2399,22 +2392,22 @@ TEST(round_w) {
int32_t c; // a trunc result
int32_t d; // b trunc result
}Test;
- const int tableLength = 15;
- double inputs_D[tableLength] = {
+ const int kTableLength = 15;
+ double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity()
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()
};
- double outputs[tableLength] = {
+ double outputs[kTableLength] = {
2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
-2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
kFPUInvalidResult, kFPUInvalidResult,
@@ -2434,7 +2427,7 @@ TEST(round_w) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -2456,22 +2449,22 @@ TEST(round_l) {
int64_t c;
int64_t d;
}Test;
- const int tableLength = 15;
- double inputs_D[tableLength] = {
+ const int kTableLength = 15;
+ double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity()
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()
};
- double outputs[tableLength] = {
+ double outputs[kTableLength] = {
2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
-2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
2147483648.0, dFPU64InvalidResult,
@@ -2491,7 +2484,7 @@ TEST(round_l) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
std::cout<< i<< "\n";
@@ -2503,7 +2496,7 @@ TEST(round_l) {
TEST(sub) {
- const int tableLength = 12;
+ const int kTableLength = 12;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -2519,27 +2512,27 @@ TEST(sub) {
}TestFloat;
TestFloat test;
- double inputfs_D[tableLength] = {
+ double inputfs_D[kTableLength] = {
5.3, 4.8, 2.9, -5.3, -4.8, -2.9,
5.3, 4.8, 2.9, -5.3, -4.8, -2.9
};
- double inputft_D[tableLength] = {
+ double inputft_D[kTableLength] = {
4.8, 5.3, 2.9, 4.8, 5.3, 2.9,
-4.8, -5.3, -2.9, -4.8, -5.3, -2.9
};
- double outputs_D[tableLength] = {
+ double outputs_D[kTableLength] = {
0.5, -0.5, 0.0, -10.1, -10.1, -5.8,
10.1, 10.1, 5.8, -0.5, 0.5, 0.0
};
- float inputfs_S[tableLength] = {
+ float inputfs_S[kTableLength] = {
5.3, 4.8, 2.9, -5.3, -4.8, -2.9,
5.3, 4.8, 2.9, -5.3, -4.8, -2.9
};
- float inputft_S[tableLength] = {
+ float inputft_S[kTableLength] = {
4.8, 5.3, 2.9, 4.8, 5.3, 2.9,
-4.8, -5.3, -2.9, -4.8, -5.3, -2.9
};
- float outputs_S[tableLength] = {
+ float outputs_S[kTableLength] = {
0.5, -0.5, 0.0, -10.1, -10.1, -5.8,
10.1, 10.1, 5.8, -0.5, 0.5, 0.0
};
@@ -2559,7 +2552,7 @@ TEST(sub) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
test.b = inputft_S[i];
test.c = inputfs_D[i];
@@ -2572,7 +2565,7 @@ TEST(sub) {
TEST(sqrt_rsqrt_recip) {
- const int tableLength = 4;
+ const int kTableLength = 4;
const double deltaDouble = 2E-15;
const float deltaFloat = 2E-7;
const float sqrt2_s = sqrt(2);
@@ -2594,18 +2587,18 @@ TEST(sqrt_rsqrt_recip) {
}TestFloat;
TestFloat test;
- double inputs_D[tableLength] = {
+ double inputs_D[kTableLength] = {
0.0L, 4.0L, 2.0L, 4e-28L
};
- double outputs_D[tableLength] = {
+ double outputs_D[kTableLength] = {
0.0L, 2.0L, sqrt2_d, 2e-14L
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
0.0, 4.0, 2.0, 4e-28
};
- float outputs_S[tableLength] = {
+ float outputs_S[kTableLength] = {
0.0, 2.0, sqrt2_s, 2e-14
};
@@ -2633,7 +2626,7 @@ TEST(sqrt_rsqrt_recip) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
float f1;
double d1;
test.a = inputs_S[i];
@@ -2668,7 +2661,7 @@ TEST(sqrt_rsqrt_recip) {
TEST(neg) {
- const int tableLength = 2;
+ const int kTableLength = 2;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -2682,18 +2675,18 @@ TEST(neg) {
}TestFloat;
TestFloat test;
- double inputs_D[tableLength] = {
+ double inputs_D[kTableLength] = {
4.0, -2.0
};
- double outputs_D[tableLength] = {
+ double outputs_D[kTableLength] = {
-4.0, 2.0
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
4.0, -2.0
};
- float outputs_S[tableLength] = {
+ float outputs_S[kTableLength] = {
-4.0, 2.0
};
__ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
@@ -2710,7 +2703,7 @@ TEST(neg) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_S[i];
test.c = inputs_D[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -2722,7 +2715,7 @@ TEST(neg) {
TEST(mul) {
- const int tableLength = 4;
+ const int kTableLength = 4;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -2738,17 +2731,17 @@ TEST(mul) {
}TestFloat;
TestFloat test;
- double inputfs_D[tableLength] = {
+ double inputfs_D[kTableLength] = {
5.3, -5.3, 5.3, -2.9
};
- double inputft_D[tableLength] = {
+ double inputft_D[kTableLength] = {
4.8, 4.8, -4.8, -0.29
};
- float inputfs_S[tableLength] = {
+ float inputfs_S[kTableLength] = {
5.3, -5.3, 5.3, -2.9
};
- float inputft_S[tableLength] = {
+ float inputft_S[kTableLength] = {
4.8, 4.8, -4.8, -0.29
};
@@ -2768,7 +2761,7 @@ TEST(mul) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
test.b = inputft_S[i];
test.c = inputfs_D[i];
@@ -2781,7 +2774,7 @@ TEST(mul) {
TEST(mov) {
- const int tableLength = 4;
+ const int kTableLength = 4;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -2795,17 +2788,17 @@ TEST(mov) {
}TestFloat;
TestFloat test;
- double inputs_D[tableLength] = {
+ double inputs_D[kTableLength] = {
5.3, -5.3, 5.3, -2.9
};
- double inputs_S[tableLength] = {
+ double inputs_S[kTableLength] = {
4.8, 4.8, -4.8, -0.29
};
- float outputs_S[tableLength] = {
+ float outputs_S[kTableLength] = {
4.8, 4.8, -4.8, -0.29
};
- double outputs_D[tableLength] = {
+ double outputs_D[kTableLength] = {
5.3, -5.3, 5.3, -2.9
};
@@ -2823,7 +2816,7 @@ TEST(mov) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.c = inputs_S[i];
@@ -2846,22 +2839,22 @@ TEST(floor_w) {
int32_t c; // a floor result
int32_t d; // b floor result
}Test;
- const int tableLength = 15;
- double inputs_D[tableLength] = {
+ const int kTableLength = 15;
+ double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity()
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()
};
- double outputs[tableLength] = {
+ double outputs[kTableLength] = {
2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
-3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
kFPUInvalidResult, kFPUInvalidResult,
@@ -2881,7 +2874,7 @@ TEST(floor_w) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -2903,22 +2896,22 @@ TEST(floor_l) {
int64_t c;
int64_t d;
}Test;
- const int tableLength = 15;
- double inputs_D[tableLength] = {
+ const int kTableLength = 15;
+ double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity()
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()
};
- double outputs[tableLength] = {
+ double outputs[kTableLength] = {
2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
-3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
2147483648.0, dFPU64InvalidResult,
@@ -2938,7 +2931,7 @@ TEST(floor_l) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -2960,22 +2953,22 @@ TEST(ceil_w) {
int32_t c; // a floor result
int32_t d; // b floor result
}Test;
- const int tableLength = 15;
- double inputs_D[tableLength] = {
+ const int kTableLength = 15;
+ double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity()
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()
};
- double outputs[tableLength] = {
+ double outputs[kTableLength] = {
3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
kFPUInvalidResult, kFPUInvalidResult,
@@ -2995,7 +2988,7 @@ TEST(ceil_w) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -3017,22 +3010,22 @@ TEST(ceil_l) {
int64_t c;
int64_t d;
}Test;
- const int tableLength = 15;
- double inputs_D[tableLength] = {
+ const int kTableLength = 15;
+ double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::infinity()
};
- float inputs_S[tableLength] = {
+ float inputs_S[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
-2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
2147483648.0,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()
};
- double outputs[tableLength] = {
+ double outputs[kTableLength] = {
3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
2147483648.0, dFPU64InvalidResult,
@@ -3052,7 +3045,7 @@ TEST(ceil_l) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- for (int i = 0; i < tableLength; i++) {
+ for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
(CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
@@ -3517,6 +3510,8 @@ TEST(class_fmt) {
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
// Expected double results.
+ CHECK_EQ(bit_cast<int64_t>(t.dSignalingNan), 0x001);
+ CHECK_EQ(bit_cast<int64_t>(t.dQuietNan), 0x002);
CHECK_EQ(bit_cast<int64_t>(t.dNegInf), 0x004);
CHECK_EQ(bit_cast<int64_t>(t.dNegNorm), 0x008);
CHECK_EQ(bit_cast<int64_t>(t.dNegSubnorm), 0x010);
@@ -3527,6 +3522,8 @@ TEST(class_fmt) {
CHECK_EQ(bit_cast<int64_t>(t.dPosZero), 0x200);
// Expected float results.
+ CHECK_EQ(bit_cast<int32_t>(t.fSignalingNan), 0x001);
+ CHECK_EQ(bit_cast<int32_t>(t.fQuietNan), 0x002);
CHECK_EQ(bit_cast<int32_t>(t.fNegInf), 0x004);
CHECK_EQ(bit_cast<int32_t>(t.fNegNorm), 0x008);
CHECK_EQ(bit_cast<int32_t>(t.fNegSubnorm), 0x010);
@@ -5366,4 +5363,104 @@ TEST(r6_balc) {
}
+uint64_t run_dsll(uint64_t rt_value, uint16_t sa_value) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0);
+
+ __ dsll(v0, a0, sa_value);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+
+ uint64_t res =
+ reinterpret_cast<uint64_t>(CALL_GENERATED_CODE(f, rt_value, 0, 0, 0, 0));
+
+ return res;
+}
+
+
+TEST(dsll) {
+ CcTest::InitializeVM();
+
+ struct TestCaseDsll {
+ uint64_t rt_value;
+ uint16_t sa_value;
+ uint64_t expected_res;
+ };
+
+ struct TestCaseDsll tc[] = {
+ // rt_value, sa_value, expected_res
+ { 0xffffffffffffffff, 0, 0xffffffffffffffff },
+ { 0xffffffffffffffff, 16, 0xffffffffffff0000 },
+ { 0xffffffffffffffff, 31, 0xffffffff80000000 },
+ };
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseDsll);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ CHECK_EQ(tc[i].expected_res,
+ run_dsll(tc[i].rt_value, tc[i].sa_value));
+ }
+}
+
+
+uint64_t run_bal(int16_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0);
+
+ __ mov(t0, ra);
+ __ bal(offset); // Equivalent for "BGEZAL zero_reg, offset".
+ __ nop();
+
+ __ mov(ra, t0);
+ __ jr(ra);
+ __ nop();
+
+ __ li(v0, 1);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+
+ uint64_t res =
+ reinterpret_cast<uint64_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+
+ return res;
+}
+
+
+TEST(bal) {
+ CcTest::InitializeVM();
+
+ struct TestCaseBal {
+ int16_t offset;
+ uint64_t expected_res;
+ };
+
+ struct TestCaseBal tc[] = {
+ // offset, expected_res
+ { 4, 1 },
+ };
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBal);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ CHECK_EQ(tc[i].expected_res, run_bal(tc[i].offset));
+ }
+}
+
+
#undef __
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 3b25480a4a..e35b430555 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -376,7 +376,7 @@ TEST(OptimizedCodeSharing1) {
FLAG_cache_optimized_code = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- for (int i = 0; i < 10; i++) {
+ for (int i = 0; i < 3; i++) {
LocalContext env;
env->Global()->Set(v8::String::NewFromUtf8(CcTest::isolate(), "x"),
v8::Integer::New(CcTest::isolate(), i));
@@ -432,7 +432,65 @@ TEST(OptimizedCodeSharing2) {
CHECK(fun0->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
reference_code = handle(fun0->code());
}
- for (int i = 0; i < 10; i++) {
+ for (int i = 0; i < 3; i++) {
+ LocalContext env;
+ env->Global()->Set(v8::String::NewFromUtf8(CcTest::isolate(), "x"),
+ v8::Integer::New(CcTest::isolate(), i));
+ script->GetUnboundScript()->BindToCurrentContext()->Run();
+ CompileRun(
+ "var closure0 = MakeClosure();"
+ "%DebugPrint(closure0());"
+ "%OptimizeFunctionOnNextCall(closure0);"
+ "%DebugPrint(closure0());"
+ "var closure1 = MakeClosure();"
+ "var closure2 = MakeClosure();");
+ Handle<JSFunction> fun1 = v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(env->Global()->Get(v8_str("closure1"))));
+ Handle<JSFunction> fun2 = v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(env->Global()->Get(v8_str("closure2"))));
+ CHECK(fun1->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
+ CHECK(fun2->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
+ CHECK_EQ(*reference_code, fun1->code());
+ CHECK_EQ(*reference_code, fun2->code());
+ }
+}
+
+
+// Test that optimized code for different closures is actually shared
+// immediately by the FastNewClosureStub without context-dependent entries.
+TEST(OptimizedCodeSharing3) {
+ if (FLAG_stress_compaction) return;
+ FLAG_allow_natives_syntax = true;
+ FLAG_cache_optimized_code = true;
+ FLAG_turbo_cache_shared_code = true;
+ const char* flag = "--turbo-filter=*";
+ FlagList::SetFlagsFromString(flag, StrLength(flag));
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Script> script = v8_compile(
+ "function MakeClosure() {"
+ " return function() { return x; };"
+ "}");
+ Handle<Code> reference_code;
+ {
+ LocalContext env;
+ env->Global()->Set(v8::String::NewFromUtf8(CcTest::isolate(), "x"),
+ v8::Integer::New(CcTest::isolate(), 23));
+ script->GetUnboundScript()->BindToCurrentContext()->Run();
+ CompileRun(
+ "var closure0 = MakeClosure();"
+ "%DebugPrint(closure0());"
+ "%OptimizeFunctionOnNextCall(closure0);"
+ "%DebugPrint(closure0());");
+ Handle<JSFunction> fun0 = v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(env->Global()->Get(v8_str("closure0"))));
+ CHECK(fun0->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
+ reference_code = handle(fun0->code());
+ // Evict only the context-dependent entry from the optimized code map. This
+ // leaves it in a state where only the context-independent entry exists.
+ fun0->shared()->TrimOptimizedCodeMap(SharedFunctionInfo::kEntryLength);
+ }
+ for (int i = 0; i < 3; i++) {
LocalContext env;
env->Global()->Set(v8::String::NewFromUtf8(CcTest::isolate(), "x"),
v8::Integer::New(CcTest::isolate(), i));
@@ -566,6 +624,32 @@ TEST(CompileFunctionInContextNonIdentifierArgs) {
}
+TEST(CompileFunctionInContextScriptOrigin) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext env;
+ v8::ScriptOrigin origin(v8_str("test"),
+ v8::Integer::New(CcTest::isolate(), 22),
+ v8::Integer::New(CcTest::isolate(), 41));
+ v8::ScriptCompiler::Source script_source(v8_str("throw new Error()"), origin);
+ v8::Local<v8::Function> fun = v8::ScriptCompiler::CompileFunctionInContext(
+ CcTest::isolate(), &script_source, env.local(), 0, NULL, 0, NULL);
+ CHECK(!fun.IsEmpty());
+ v8::TryCatch try_catch;
+ CcTest::isolate()->SetCaptureStackTraceForUncaughtExceptions(true);
+ fun->Call(env->Global(), 0, NULL);
+ CHECK(try_catch.HasCaught());
+ CHECK(!try_catch.Exception().IsEmpty());
+ v8::Local<v8::StackTrace> stack =
+ v8::Exception::GetStackTrace(try_catch.Exception());
+ CHECK(!stack.IsEmpty());
+ CHECK(stack->GetFrameCount() > 0);
+ v8::Local<v8::StackFrame> frame = stack->GetFrame(0);
+ CHECK_EQ(23, frame->GetLineNumber());
+ CHECK_EQ(42 + strlen("throw "), static_cast<unsigned>(frame->GetColumn()));
+}
+
+
#ifdef ENABLE_DISASSEMBLER
static Handle<JSFunction> GetJSFunction(v8::Handle<v8::Object> obj,
const char* property_name) {
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index e24f6f9050..17eec07376 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -31,9 +31,9 @@
#include "include/v8-profiler.h"
#include "src/base/platform/platform.h"
+#include "src/base/smart-pointers.h"
#include "src/cpu-profiler-inl.h"
#include "src/deoptimizer.h"
-#include "src/smart-pointers.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
#include "test/cctest/profiler-extension.h"
@@ -46,8 +46,8 @@ using i::ProfileGenerator;
using i::ProfileNode;
using i::ProfilerEventsProcessor;
using i::ScopedVector;
-using i::SmartPointer;
using i::Vector;
+using v8::base::SmartPointer;
// Helper methods
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index a6ffdca179..8f569ae6fe 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -33,7 +33,7 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frames.h"
#include "src/utils.h"
@@ -52,7 +52,6 @@ using ::v8::internal::Heap;
using ::v8::internal::JSGlobalProxy;
using ::v8::internal::Code;
using ::v8::internal::Debug;
-using ::v8::internal::Debugger;
using ::v8::internal::CommandMessage;
using ::v8::internal::CommandMessageQueue;
using ::v8::internal::StackFrame;
@@ -155,7 +154,7 @@ static v8::Local<v8::Function> CompileFunction(v8::Isolate* isolate,
static bool HasDebugInfo(v8::Handle<v8::Function> fun) {
Handle<v8::internal::JSFunction> f = v8::Utils::OpenHandle(*fun);
Handle<v8::internal::SharedFunctionInfo> shared(f->shared());
- return Debug::HasDebugInfo(shared);
+ return shared->HasDebugInfo();
}
@@ -412,13 +411,10 @@ void CheckDebuggerUnloaded(bool check_functions) {
if (check_functions) {
if (obj->IsJSFunction()) {
JSFunction* fun = JSFunction::cast(obj);
- for (RelocIterator it(fun->shared()->code()); !it.done(); it.next()) {
- RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (RelocInfo::IsCodeTarget(rmode)) {
- CHECK(!Debug::IsDebugBreak(it.rinfo()->target_address()));
- } else if (RelocInfo::IsJSReturn(rmode)) {
- CHECK(!it.rinfo()->IsPatchedReturnSequence());
- }
+ for (RelocIterator it(fun->shared()->code(),
+ RelocInfo::kDebugBreakSlotMask);
+ !it.done(); it.next()) {
+ CHECK(!it.rinfo()->IsPatchedDebugBreakSlotSequence());
}
}
}
@@ -439,61 +435,6 @@ static void CheckDebuggerUnloaded(bool check_functions = false) {
}
-// Compile a function, set a break point and check that the call at the break
-// location in the code is the expected debug_break function.
-void CheckDebugBreakFunction(DebugLocalContext* env,
- const char* source, const char* name,
- int position, v8::internal::RelocInfo::Mode mode,
- Code* debug_break) {
- EnableDebugger();
- i::Debug* debug = CcTest::i_isolate()->debug();
-
- // Create function and set the break point.
- Handle<i::JSFunction> fun =
- v8::Utils::OpenHandle(*CompileFunction(env, source, name));
- int bp = SetBreakPoint(fun, position);
-
- // Check that the debug break function is as expected.
- Handle<i::SharedFunctionInfo> shared(fun->shared());
- CHECK(Debug::HasDebugInfo(shared));
- i::BreakLocation location = i::BreakLocation::FromPosition(
- Debug::GetDebugInfo(shared), i::SOURCE_BREAK_LOCATIONS, position,
- i::STATEMENT_ALIGNED);
- i::RelocInfo::Mode actual_mode = location.rmode();
- if (actual_mode == i::RelocInfo::CODE_TARGET_WITH_ID) {
- actual_mode = i::RelocInfo::CODE_TARGET;
- }
- CHECK_EQ(mode, actual_mode);
- if (mode != i::RelocInfo::JS_RETURN) {
- CHECK_EQ(debug_break, *location.CodeTarget());
- } else {
- i::RelocInfo rinfo = location.rinfo();
- CHECK(i::RelocInfo::IsJSReturn(rinfo.rmode()));
- CHECK(rinfo.IsPatchedReturnSequence());
- }
-
- // Clear the break point and check that the debug break function is no longer
- // there
- ClearBreakPoint(bp);
- CHECK(!debug->HasDebugInfo(shared));
- CHECK(debug->EnsureDebugInfo(shared, fun));
- location = i::BreakLocation::FromPosition(Debug::GetDebugInfo(shared),
- i::SOURCE_BREAK_LOCATIONS, position,
- i::STATEMENT_ALIGNED);
- actual_mode = location.rmode();
- if (actual_mode == i::RelocInfo::CODE_TARGET_WITH_ID) {
- actual_mode = i::RelocInfo::CODE_TARGET;
- }
- CHECK_EQ(mode, actual_mode);
- if (mode == i::RelocInfo::JS_RETURN) {
- i::RelocInfo rinfo = location.rinfo();
- CHECK(!rinfo.IsPatchedReturnSequence());
- }
-
- DisableDebugger();
-}
-
-
// --- D e b u g E v e n t H a n d l e r s
// ---
// --- The different tests uses a number of debug event handlers.
@@ -911,7 +852,6 @@ bool terminate_after_max_break_point_hit = false;
static void DebugEventBreakMax(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
- v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
v8::Isolate* v8_isolate = CcTest::isolate();
v8::internal::Isolate* isolate = CcTest::i_isolate();
v8::internal::Debug* debug = isolate->debug();
@@ -923,17 +863,6 @@ static void DebugEventBreakMax(
// Count the number of breaks.
break_point_hit_count++;
- // Collect the JavsScript stack height if the function frame_count is
- // compiled.
- if (!frame_count.IsEmpty()) {
- static const int kArgc = 1;
- v8::Handle<v8::Value> argv[kArgc] = { exec_state };
- // Using exec_state as receiver is just to have a receiver.
- v8::Handle<v8::Value> result =
- frame_count->Call(exec_state, kArgc, argv);
- last_js_stack_height = result->Int32Value();
- }
-
// Set the break flag again to come back here as soon as possible.
v8::Debug::DebugBreak(v8_isolate);
@@ -7145,15 +7074,22 @@ TEST(DebugBreakStackInspection) {
static void TestDebugBreakInLoop(const char* loop_head,
const char** loop_bodies,
const char* loop_tail) {
- // Receive 100 breaks for each test and then terminate JavaScript execution.
- static const int kBreaksPerTest = 100;
+ // Receive 10 breaks for each test and then terminate JavaScript execution.
+ static const int kBreaksPerTest = 10;
for (int i = 0; loop_bodies[i] != NULL; i++) {
// Perform a lazy deoptimization after various numbers of breaks
// have been hit.
- for (int j = 0; j < 7; j++) {
+
+ EmbeddedVector<char, 1024> buffer;
+ SNPrintF(buffer, "function f() {%s%s%s}", loop_head, loop_bodies[i],
+ loop_tail);
+
+ i::PrintF("%s\n", buffer.start());
+
+ for (int j = 0; j < 3; j++) {
break_point_hit_count_deoptimize = j;
- if (j == 6) {
+ if (j == 2) {
break_point_hit_count_deoptimize = kBreaksPerTest;
}
@@ -7161,11 +7097,6 @@ static void TestDebugBreakInLoop(const char* loop_head,
max_break_point_hit_count = kBreaksPerTest;
terminate_after_max_break_point_hit = true;
- EmbeddedVector<char, 1024> buffer;
- SNPrintF(buffer,
- "function f() {%s%s%s}",
- loop_head, loop_bodies[i], loop_tail);
-
// Function with infinite loop.
CompileRun(buffer.start());
@@ -7182,43 +7113,38 @@ static void TestDebugBreakInLoop(const char* loop_head,
}
-TEST(DebugBreakLoop) {
+static const char* loop_bodies_1[] = {"",
+ "g()",
+ "if (a == 0) { g() }",
+ "if (a == 1) { g() }",
+ "if (a == 0) { g() } else { h() }",
+ "if (a == 0) { continue }",
+ NULL};
+
+
+static const char* loop_bodies_2[] = {
+ "if (a == 1) { continue }",
+ "switch (a) { case 1: g(); }",
+ "switch (a) { case 1: continue; }",
+ "switch (a) { case 1: g(); break; default: h() }",
+ "switch (a) { case 1: continue; break; default: h() }",
+ NULL};
+
+
+void DebugBreakLoop(const char* loop_header, const char** loop_bodies,
+ const char* loop_footer) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
// Register a debug event listener which sets the break flag and counts.
v8::Debug::SetDebugEventListener(DebugEventBreakMax);
- // Create a function for getting the frame count when hitting the break.
- frame_count = CompileFunction(&env, frame_count_source, "frame_count");
-
- CompileRun("var a = 1;");
- CompileRun("function g() { }");
- CompileRun("function h() { }");
-
- const char* loop_bodies[] = {
- "",
- "g()",
- "if (a == 0) { g() }",
- "if (a == 1) { g() }",
- "if (a == 0) { g() } else { h() }",
- "if (a == 0) { continue }",
- "if (a == 1) { continue }",
- "switch (a) { case 1: g(); }",
- "switch (a) { case 1: continue; }",
- "switch (a) { case 1: g(); break; default: h() }",
- "switch (a) { case 1: continue; break; default: h() }",
- NULL
- };
-
- TestDebugBreakInLoop("while (true) {", loop_bodies, "}");
- TestDebugBreakInLoop("while (a == 1) {", loop_bodies, "}");
-
- TestDebugBreakInLoop("do {", loop_bodies, "} while (true)");
- TestDebugBreakInLoop("do {", loop_bodies, "} while (a == 1)");
+ CompileRun(
+ "var a = 1;\n"
+ "function g() { }\n"
+ "function h() { }");
- TestDebugBreakInLoop("for (;;) {", loop_bodies, "}");
- TestDebugBreakInLoop("for (;a == 1;) {", loop_bodies, "}");
+ TestDebugBreakInLoop(loop_header, loop_bodies, loop_footer);
// Get rid of the debug event listener.
v8::Debug::SetDebugEventListener(NULL);
@@ -7226,6 +7152,62 @@ TEST(DebugBreakLoop) {
}
+TEST(DebugBreakInWhileTrue1) {
+ DebugBreakLoop("while (true) {", loop_bodies_1, "}");
+}
+
+
+TEST(DebugBreakInWhileTrue2) {
+ DebugBreakLoop("while (true) {", loop_bodies_2, "}");
+}
+
+
+TEST(DebugBreakInWhileCondition1) {
+ DebugBreakLoop("while (a == 1) {", loop_bodies_1, "}");
+}
+
+
+TEST(DebugBreakInWhileCondition2) {
+ DebugBreakLoop("while (a == 1) {", loop_bodies_2, "}");
+}
+
+
+TEST(DebugBreakInDoWhileTrue1) {
+ DebugBreakLoop("do {", loop_bodies_1, "} while (true)");
+}
+
+
+TEST(DebugBreakInDoWhileTrue2) {
+ DebugBreakLoop("do {", loop_bodies_2, "} while (true)");
+}
+
+
+TEST(DebugBreakInDoWhileCondition1) {
+ DebugBreakLoop("do {", loop_bodies_1, "} while (a == 1)");
+}
+
+
+TEST(DebugBreakInDoWhileCondition2) {
+ DebugBreakLoop("do {", loop_bodies_2, "} while (a == 1)");
+}
+
+
+TEST(DebugBreakInFor1) { DebugBreakLoop("for (;;) {", loop_bodies_1, "}"); }
+
+
+TEST(DebugBreakInFor2) { DebugBreakLoop("for (;;) {", loop_bodies_2, "}"); }
+
+
+TEST(DebugBreakInForCondition1) {
+ DebugBreakLoop("for (;a == 1;) {", loop_bodies_1, "}");
+}
+
+
+TEST(DebugBreakInForCondition2) {
+ DebugBreakLoop("for (;a == 1;) {", loop_bodies_2, "}");
+}
+
+
v8::Local<v8::Script> inline_script;
static void DebugBreakInlineListener(
@@ -7331,7 +7313,12 @@ TEST(Regress131642) {
// Import from test-heap.cc
+namespace v8 {
+namespace internal {
+
int CountNativeContexts();
+}
+}
static void NopListener(const v8::Debug::EventDetails& event_details) {
@@ -7341,15 +7328,15 @@ static void NopListener(const v8::Debug::EventDetails& event_details) {
TEST(DebuggerCreatesContextIffActive) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
- CHECK_EQ(1, CountNativeContexts());
+ CHECK_EQ(1, v8::internal::CountNativeContexts());
v8::Debug::SetDebugEventListener(NULL);
CompileRun("debugger;");
- CHECK_EQ(1, CountNativeContexts());
+ CHECK_EQ(1, v8::internal::CountNativeContexts());
v8::Debug::SetDebugEventListener(NopListener);
CompileRun("debugger;");
- CHECK_EQ(2, CountNativeContexts());
+ CHECK_EQ(2, v8::internal::CountNativeContexts());
v8::Debug::SetDebugEventListener(NULL);
}
@@ -7637,3 +7624,27 @@ TEST(DebugBreakInLexicalScopes) {
"x * y",
30);
}
+
+static int after_compile_handler_depth = 0;
+static void HandleInterrupt(v8::Isolate* isolate, void* data) {
+ CHECK_EQ(0, after_compile_handler_depth);
+}
+
+static void NoInterruptsOnDebugEvent(
+ const v8::Debug::EventDetails& event_details) {
+ if (event_details.GetEvent() != v8::AfterCompile) return;
+ ++after_compile_handler_depth;
+ // Do not allow nested AfterCompile events.
+ CHECK(after_compile_handler_depth <= 1);
+ v8::Isolate* isolate = event_details.GetEventContext()->GetIsolate();
+ isolate->RequestInterrupt(&HandleInterrupt, nullptr);
+ CompileRun("function foo() {}; foo();");
+ --after_compile_handler_depth;
+}
+
+
+TEST(NoInterruptsInDebugListener) {
+ DebugLocalContext env;
+ v8::Debug::SetDebugEventListener(NoInterruptsOnDebugEvent);
+ CompileRun("void(0);");
+}
diff --git a/deps/v8/test/cctest/test-deoptimization.cc b/deps/v8/test/cctest/test-deoptimization.cc
index 1d512e0a75..20c1913923 100644
--- a/deps/v8/test/cctest/test-deoptimization.cc
+++ b/deps/v8/test/cctest/test-deoptimization.cc
@@ -32,7 +32,7 @@
#include "src/api.h"
#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/isolate.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index 03f3c27d51..dc8a8b5bed 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -29,7 +29,7 @@
#include "test/cctest/cctest.h"
#include "src/api.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/global-handles.h"
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index cc5e89f1fd..b3b8a0358e 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -30,7 +30,7 @@
#include "src/v8.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
index b3b50acf9d..643a3c1bdb 100644
--- a/deps/v8/test/cctest/test-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -1249,25 +1249,6 @@ TEST_(load_store_pair) {
}
-TEST_(load_store_pair_nontemp) {
- SET_UP();
-
- COMPARE(ldnp(w0, w1, MemOperand(x2)), "ldnp w0, w1, [x2]");
- COMPARE(stnp(w3, w4, MemOperand(x5, 252)), "stnp w3, w4, [x5, #252]");
- COMPARE(ldnp(w6, w7, MemOperand(x8, -256)), "ldnp w6, w7, [x8, #-256]");
- COMPARE(stnp(x9, x10, MemOperand(x11)), "stnp x9, x10, [x11]");
- COMPARE(ldnp(x12, x13, MemOperand(x14, 504)), "ldnp x12, x13, [x14, #504]");
- COMPARE(stnp(x15, x16, MemOperand(x17, -512)), "stnp x15, x16, [x17, #-512]");
- COMPARE(ldnp(s18, s19, MemOperand(x20)), "ldnp s18, s19, [x20]");
- COMPARE(stnp(s21, s22, MemOperand(x23, 252)), "stnp s21, s22, [x23, #252]");
- COMPARE(ldnp(s24, s25, MemOperand(x26, -256)), "ldnp s24, s25, [x26, #-256]");
- COMPARE(stnp(d27, d28, MemOperand(fp)), "stnp d27, d28, [fp]");
- COMPARE(ldnp(d30, d31, MemOperand(x0, 504)), "ldnp d30, d31, [x0, #504]");
- COMPARE(stnp(d1, d2, MemOperand(x3, -512)), "stnp d1, d2, [x3, #-512]");
-
- CLEANUP();
-}
-
#if 0 // TODO(all): enable.
TEST_(load_literal) {
SET_UP();
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index aeaa99538b..669e37ac69 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -29,9 +29,10 @@
#include "src/v8.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
+#include "src/ia32/frames-ia32.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
@@ -287,7 +288,7 @@ TEST(DisasmIa320) {
__ bind(&L2);
__ call(Operand(ebx, ecx, times_4, 10000));
__ nop();
- Handle<Code> ic(LoadIC::initialize_stub(isolate, NOT_CONTEXTUAL));
+ Handle<Code> ic(LoadIC::initialize_stub(isolate, NOT_INSIDE_TYPEOF));
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
__ call(FUNCTION_ADDR(DummyStaticFunction), RelocInfo::RUNTIME_ENTRY);
diff --git a/deps/v8/test/cctest/test-disasm-mips.cc b/deps/v8/test/cctest/test-disasm-mips.cc
index c04cd23bf5..6895ebf5d4 100644
--- a/deps/v8/test/cctest/test-disasm-mips.cc
+++ b/deps/v8/test/cctest/test-disasm-mips.cc
@@ -30,7 +30,7 @@
#include "src/v8.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
@@ -98,7 +98,7 @@ if (failure) { \
byte *progcounter = &buffer[pc_offset]; \
char str_with_address[100]; \
snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
- compare_string, progcounter + 4 + (offset << 2)); \
+ compare_string, progcounter + 4 + (offset * 4)); \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
}
@@ -110,7 +110,7 @@ if (failure) { \
byte *progcounter = &buffer[pc_offset]; \
char str_with_address[100]; \
snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
- compare_string, progcounter + (offset << 2)); \
+ compare_string, progcounter + (offset * 4)); \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
}
@@ -121,16 +121,25 @@ if (failure) { \
int pc_offset = assm.pc_offset(); \
byte *progcounter = &buffer[pc_offset]; \
char str_with_address[100]; \
- int instr_index = target >> 2; \
- snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
- compare_string, reinterpret_cast<byte *>( \
- ((uint32_t)(progcounter + 1) & ~0xfffffff) | \
+ int instr_index = (target >> 2) & kImm26Mask; \
+ snprintf( \
+ str_with_address, sizeof(str_with_address), "%s %p -> %p", \
+ compare_string, reinterpret_cast<byte *>(target), \
+ reinterpret_cast<byte *>(((uint32_t)(progcounter + 4) & ~0xfffffff) | \
(instr_index << 2))); \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
}
+#define GET_PC_REGION(pc_region) \
+ { \
+ int pc_offset = assm.pc_offset(); \
+ byte *progcounter = &buffer[pc_offset]; \
+ pc_region = reinterpret_cast<int32_t>(progcounter + 4) & ~0xfffffff; \
+ }
+
+
TEST(Type0) {
SET_UP();
@@ -466,12 +475,18 @@ TEST(Type0) {
COMPARE_PC_REL_COMPACT(bgtz(a0, 32767), "1c807fff bgtz a0, 32767",
32767);
- COMPARE_PC_JUMP(j(0x4), "08000001 j 0x4", 0x4);
- COMPARE_PC_JUMP(j(0xffffffc), "0bffffff j 0xffffffc", 0xffffffc);
+ int32_t pc_region;
+ GET_PC_REGION(pc_region);
+
+ int32_t target = pc_region | 0x4;
+ COMPARE_PC_JUMP(j(target), "08000001 j ", target);
+ target = pc_region | 0xffffffc;
+ COMPARE_PC_JUMP(j(target), "0bffffff j ", target);
- COMPARE_PC_JUMP(jal(0x4), "0c000001 jal 0x4", 0x4);
- COMPARE_PC_JUMP(jal(0xffffffc), "0fffffff jal 0xffffffc",
- 0xffffffc);
+ target = pc_region | 0x4;
+ COMPARE_PC_JUMP(jal(target), "0c000001 jal ", target);
+ target = pc_region | 0xffffffc;
+ COMPARE_PC_JUMP(jal(target), "0fffffff jal ", target);
COMPARE(addiu(a0, a1, 0x0),
"24a40000 addiu a0, a1, 0");
diff --git a/deps/v8/test/cctest/test-disasm-mips64.cc b/deps/v8/test/cctest/test-disasm-mips64.cc
index 225a1e7f0b..7cf6397886 100644
--- a/deps/v8/test/cctest/test-disasm-mips64.cc
+++ b/deps/v8/test/cctest/test-disasm-mips64.cc
@@ -30,7 +30,7 @@
#include "src/v8.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
@@ -98,7 +98,7 @@ if (failure) { \
byte *progcounter = &buffer[pc_offset]; \
char str_with_address[100]; \
snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
- compare_string, progcounter + 4 + (offset << 2)); \
+ compare_string, progcounter + 4 + (offset * 4)); \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
}
@@ -110,7 +110,7 @@ if (failure) { \
byte *progcounter = &buffer[pc_offset]; \
char str_with_address[100]; \
snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
- compare_string, progcounter + (offset << 2)); \
+ compare_string, progcounter + (offset * 4)); \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
}
@@ -121,16 +121,25 @@ if (failure) { \
int pc_offset = assm.pc_offset(); \
byte *progcounter = &buffer[pc_offset]; \
char str_with_address[100]; \
- int instr_index = target >> 2; \
- snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
- compare_string, reinterpret_cast<byte *>( \
- ((uint64_t)(progcounter + 1) & ~0xfffffff) | \
+ int instr_index = (target >> 2) & kImm26Mask; \
+ snprintf( \
+ str_with_address, sizeof(str_with_address), "%s %p -> %p", \
+ compare_string, reinterpret_cast<byte *>(target), \
+ reinterpret_cast<byte *>(((uint64_t)(progcounter + 1) & ~0xfffffff) | \
(instr_index << 2))); \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
}
+#define GET_PC_REGION(pc_region) \
+ { \
+ int pc_offset = assm.pc_offset(); \
+ byte *progcounter = &buffer[pc_offset]; \
+ pc_region = reinterpret_cast<int64_t>(progcounter + 4) & ~0xfffffff; \
+ }
+
+
TEST(Type0) {
SET_UP();
@@ -1114,12 +1123,18 @@ TEST(Type3) {
COMPARE_PC_REL_COMPACT(bgtz(a0, 32767), "1c807fff bgtz a0, 32767",
32767);
- COMPARE_PC_JUMP(j(0x4), "08000001 j 0x4", 0x4);
- COMPARE_PC_JUMP(j(0xffffffc), "0bffffff j 0xffffffc", 0xffffffc);
+ int64_t pc_region;
+ GET_PC_REGION(pc_region);
+
+ int64_t target = pc_region | 0x4;
+ COMPARE_PC_JUMP(j(target), "08000001 j ", target);
+ target = pc_region | 0xffffffc;
+ COMPARE_PC_JUMP(j(target), "0bffffff j ", target);
- COMPARE_PC_JUMP(jal(0x4), "0c000001 jal 0x4", 0x4);
- COMPARE_PC_JUMP(jal(0xffffffc), "0fffffff jal 0xffffffc",
- 0xffffffc);
+ target = pc_region | 0x4;
+ COMPARE_PC_JUMP(jal(target), "0c000001 jal ", target);
+ target = pc_region | 0xffffffc;
+ COMPARE_PC_JUMP(jal(target), "0fffffff jal ", target);
VERIFY_RUN();
}
diff --git a/deps/v8/test/cctest/test-disasm-ppc.cc b/deps/v8/test/cctest/test-disasm-ppc.cc
index ed409f2f9d..e4b434ffb1 100644
--- a/deps/v8/test/cctest/test-disasm-ppc.cc
+++ b/deps/v8/test/cctest/test-disasm-ppc.cc
@@ -30,7 +30,7 @@
#include "src/v8.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
@@ -107,8 +107,8 @@ TEST(DisasmPPC) {
COMPARE(and_(r6, r0, r6, SetRC), "7c063039 and. r6, r0, r6");
// skipping branches (for now?)
COMPARE(bctr(), "4e800420 bctr");
+ COMPARE(bctrl(), "4e800421 bctrl");
COMPARE(blr(), "4e800020 blr");
- COMPARE(bclr(BA, SetLK), "4e800021 blrl");
// skipping call - only used in simulator
#if V8_TARGET_ARCH_PPC64
COMPARE(cmpi(r0, Operand(5)), "2fa00005 cmpi r0, 5");
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index bcfe507b25..980f5d5b0f 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -29,7 +29,7 @@
#include "src/v8.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/ic/ic.h"
@@ -282,7 +282,7 @@ TEST(DisasmX64) {
// TODO(mstarzinger): The following is protected.
// __ call(Operand(rbx, rcx, times_4, 10000));
__ nop();
- Handle<Code> ic(LoadIC::initialize_stub(isolate, NOT_CONTEXTUAL));
+ Handle<Code> ic(LoadIC::initialize_stub(isolate, NOT_INSIDE_TYPEOF));
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
__ nop();
diff --git a/deps/v8/test/cctest/test-disasm-x87.cc b/deps/v8/test/cctest/test-disasm-x87.cc
index a3433b290b..17609cfc3c 100644
--- a/deps/v8/test/cctest/test-disasm-x87.cc
+++ b/deps/v8/test/cctest/test-disasm-x87.cc
@@ -29,11 +29,12 @@
#include "src/v8.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
+#include "src/x87/frames-x87.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
@@ -287,7 +288,7 @@ TEST(DisasmIa320) {
__ bind(&L2);
__ call(Operand(ebx, ecx, times_4, 10000));
__ nop();
- Handle<Code> ic(LoadIC::initialize_stub(isolate, NOT_CONTEXTUAL));
+ Handle<Code> ic(LoadIC::initialize_stub(isolate, NOT_INSIDE_TYPEOF));
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
__ call(FUNCTION_ADDR(DummyStaticFunction), RelocInfo::RUNTIME_ENTRY);
diff --git a/deps/v8/test/cctest/test-extra.js b/deps/v8/test/cctest/test-extra.js
index 829ddee01a..f943ea6c4e 100644
--- a/deps/v8/test/cctest/test-extra.js
+++ b/deps/v8/test/cctest/test-extra.js
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function (global, exports) {
+(function (global, binding) {
'use strict';
- exports.testExtraShouldReturnFive = function () {
+ binding.testExtraShouldReturnFive = function () {
return 5;
};
- exports.testExtraShouldCallToRuntime = function() {
- return exports.runtime(3);
+ binding.testExtraShouldCallToRuntime = function() {
+ return binding.runtime(3);
};
})
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index cf8a730fb7..b982c0f02b 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -6,7 +6,7 @@
#include "test/cctest/cctest.h"
#include "src/api.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/global-handles.h"
@@ -40,7 +40,8 @@ TEST(VectorStructure) {
CHECK_EQ(1, vector->Slots());
CHECK_EQ(0, vector->ICSlots());
- FeedbackVectorSpec one_icslot(0, Code::CALL_IC);
+ ZoneFeedbackVectorSpec one_icslot(zone, 0, 1);
+ one_icslot.SetKind(0, Code::CALL_IC);
vector = factory->NewTypeFeedbackVector(&one_icslot);
CHECK_EQ(0, vector->Slots());
CHECK_EQ(1, vector->ICSlots());
@@ -385,4 +386,101 @@ TEST(VectorLoadICOnSmi) {
nexus.FindAllMaps(&maps2);
CHECK_EQ(2, maps2.length());
}
+
+
+static Handle<JSFunction> GetFunction(const char* name) {
+ Handle<JSFunction> f = v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(CcTest::global()->Get(v8_str(name))));
+ return f;
+}
+
+
+TEST(ReferenceContextAllocatesNoSlots) {
+ if (i::FLAG_always_opt) return;
+ CcTest::InitializeVM();
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Isolate* isolate = CcTest::i_isolate();
+
+ CompileRun(
+ "function testvar(x) {"
+ " y = x;"
+ " y = a;"
+ " return y;"
+ "}"
+ "a = 3;"
+ "testvar({});");
+
+ Handle<JSFunction> f = GetFunction("testvar");
+
+ // There should be two LOAD_ICs, one for a and one for y at the end.
+ Handle<TypeFeedbackVector> feedback_vector =
+ handle(f->shared()->feedback_vector(), isolate);
+ CHECK_EQ(2, feedback_vector->ICSlots());
+ CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::LOAD_IC);
+ CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == Code::LOAD_IC);
+
+ CompileRun(
+ "function testprop(x) {"
+ " x.blue = a;"
+ "}"
+ "testprop({ blue: 3 });");
+
+ f = GetFunction("testprop");
+
+ // There should be one LOAD_IC, for the load of a.
+ feedback_vector = handle(f->shared()->feedback_vector(), isolate);
+ CHECK_EQ(1, feedback_vector->ICSlots());
+
+ CompileRun(
+ "function testpropfunc(x) {"
+ " x().blue = a;"
+ " return x().blue;"
+ "}"
+ "function makeresult() { return { blue: 3 }; }"
+ "testpropfunc(makeresult);");
+
+ f = GetFunction("testpropfunc");
+
+ // There should be 2 LOAD_ICs and 2 CALL_ICs.
+ feedback_vector = handle(f->shared()->feedback_vector(), isolate);
+ CHECK_EQ(4, feedback_vector->ICSlots());
+ CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::CALL_IC);
+ CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == Code::LOAD_IC);
+ CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(2)) == Code::CALL_IC);
+ CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(3)) == Code::LOAD_IC);
+
+ CompileRun(
+ "function testkeyedprop(x) {"
+ " x[0] = a;"
+ " return x[0];"
+ "}"
+ "testkeyedprop([0, 1, 2]);");
+
+ f = GetFunction("testkeyedprop");
+
+ // There should be 1 LOAD_ICs for the load of a, and one KEYED_LOAD_IC for the
+ // load of x[0] in the return statement.
+ feedback_vector = handle(f->shared()->feedback_vector(), isolate);
+ CHECK_EQ(2, feedback_vector->ICSlots());
+ CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::LOAD_IC);
+ CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) ==
+ Code::KEYED_LOAD_IC);
+
+ CompileRun(
+ "function testcompound(x) {"
+ " x.old = x.young = x.in_between = a;"
+ " return x.old + x.young;"
+ "}"
+ "testcompound({ old: 3, young: 3, in_between: 3 });");
+
+ f = GetFunction("testcompound");
+
+ // There should be 3 LOAD_ICs, for load of a and load of x.old and x.young.
+ feedback_vector = handle(f->shared()->feedback_vector(), isolate);
+ CHECK_EQ(3, feedback_vector->ICSlots());
+ CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::LOAD_IC);
+ CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == Code::LOAD_IC);
+ CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(2)) == Code::LOAD_IC);
+}
}
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index ae8e77d745..6c7aa030bc 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -29,11 +29,12 @@
#include "src/v8.h"
#include "src/api.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/string-search.h"
#include "test/cctest/cctest.h"
+using ::v8::base::SmartArrayPointer;
using ::v8::internal::CStrVector;
using ::v8::internal::Factory;
using ::v8::internal::Handle;
@@ -43,7 +44,6 @@ using ::v8::internal::JSFunction;
using ::v8::internal::Object;
using ::v8::internal::Runtime;
using ::v8::internal::Script;
-using ::v8::internal::SmartArrayPointer;
using ::v8::internal::SharedFunctionInfo;
using ::v8::internal::String;
using ::v8::internal::Vector;
@@ -80,7 +80,6 @@ static void CheckFunctionName(v8::Handle<v8::Script> script,
CHECK_NE(0, func_pos);
// Obtain SharedFunctionInfo for the function.
- isolate->debug()->PrepareForBreakPoints();
Handle<SharedFunctionInfo> shared_func_info =
Handle<SharedFunctionInfo>::cast(
isolate->debug()->FindSharedFunctionInfoInScript(i_script, func_pos));
@@ -88,6 +87,8 @@ static void CheckFunctionName(v8::Handle<v8::Script> script,
// Verify inferred function name.
SmartArrayPointer<char> inferred_name =
shared_func_info->inferred_name()->ToCString();
+ i::PrintF("expected: %s, found: %s\n", ref_inferred_name,
+ inferred_name.get());
CHECK_EQ(0, strcmp(ref_inferred_name, inferred_name.get()));
}
@@ -223,6 +224,44 @@ TEST(ObjectLiteral) {
}
+TEST(UpperCaseClass) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ v8::Handle<v8::Script> script = Compile(CcTest::isolate(),
+ "'use strict';\n"
+ "class MyClass {\n"
+ " constructor() {\n"
+ " this.value = 1;\n"
+ " }\n"
+ " method() {\n"
+ " this.value = 2;\n"
+ " }\n"
+ "}");
+ CheckFunctionName(script, "this.value = 1", "MyClass");
+ CheckFunctionName(script, "this.value = 2", "MyClass.method");
+}
+
+
+TEST(LowerCaseClass) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ v8::Handle<v8::Script> script = Compile(CcTest::isolate(),
+ "'use strict';\n"
+ "class myclass {\n"
+ " constructor() {\n"
+ " this.value = 1;\n"
+ " }\n"
+ " method() {\n"
+ " this.value = 2;\n"
+ " }\n"
+ "}");
+ CheckFunctionName(script, "this.value = 1", "myclass");
+ CheckFunctionName(script, "this.value = 2", "myclass.method");
+}
+
+
TEST(AsParameter) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index ee295d6991..20aaeac73e 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -380,3 +380,20 @@ TEST(EternalHandles) {
CHECK_EQ(2*kArrayLength + 1, eternal_handles->NumberOfHandles());
}
+
+
+TEST(PersistentBaseGetLocal) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> o = v8::Object::New(isolate);
+ CHECK(!o.IsEmpty());
+ v8::Persistent<v8::Object> p(isolate, o);
+ CHECK(o == p.Get(isolate));
+ CHECK(v8::Local<v8::Object>::New(isolate, p) == p.Get(isolate));
+
+ v8::Global<v8::Object> g(isolate, o);
+ CHECK(o == g.Get(isolate));
+ CHECK(v8::Local<v8::Object>::New(isolate, g) == g.Get(isolate));
+}
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 4416f38973..f4c8c1a486 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -33,9 +33,10 @@
#include "include/v8-profiler.h"
#include "src/allocation-tracker.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/hashmap.h"
#include "src/heap-profiler.h"
+#include "src/heap-snapshot-generator-inl.h"
#include "test/cctest/cctest.h"
using i::AllocationTraceNode;
@@ -95,10 +96,11 @@ class NamedEntriesDetector {
static const v8::HeapGraphNode* GetGlobalObject(
const v8::HeapSnapshot* snapshot) {
- CHECK_EQ(2, snapshot->GetRoot()->GetChildrenCount());
- // The 0th-child is (GC Roots), 1st is the user root.
+ CHECK_EQ(3, snapshot->GetRoot()->GetChildrenCount());
+ // The 0th-child is (GC Roots), 1st is code stubs context, 2nd is the user
+ // root.
const v8::HeapGraphNode* global_obj =
- snapshot->GetRoot()->GetChild(1)->GetToNode();
+ snapshot->GetRoot()->GetChild(2)->GetToNode();
CHECK_EQ(0, strncmp("Object", const_cast<i::HeapEntry*>(
reinterpret_cast<const i::HeapEntry*>(global_obj))->name(), 6));
return global_obj;
@@ -481,6 +483,34 @@ TEST(HeapSnapshotSymbol) {
}
+void CheckSimdSnapshot(const char* program, const char* var_name) {
+ i::FLAG_harmony_simd = true;
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+
+ CompileRun(program);
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
+ CHECK(ValidateSnapshot(snapshot));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* var =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, var_name);
+ CHECK(var);
+ CHECK_EQ(var->GetType(), v8::HeapGraphNode::kSimdValue);
+}
+
+
+TEST(HeapSnapshotSimd) {
+ CheckSimdSnapshot("a = SIMD.Float32x4();\n", "a");
+ CheckSimdSnapshot("a = SIMD.Int32x4();\n", "a");
+ CheckSimdSnapshot("a = SIMD.Bool32x4();\n", "a");
+ CheckSimdSnapshot("a = SIMD.Int16x8();\n", "a");
+ CheckSimdSnapshot("a = SIMD.Bool16x8();\n", "a");
+ CheckSimdSnapshot("a = SIMD.Int8x16();\n", "a");
+ CheckSimdSnapshot("a = SIMD.Bool8x16();\n", "a");
+}
+
+
TEST(HeapSnapshotWeakCollection) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -960,7 +990,7 @@ TEST(HeapSnapshotJSONSerialization) {
v8::Local<v8::String> ref_string =
CompileRun(STRING_LITERAL_FOR_TEST)->ToString(isolate);
#undef STRING_LITERAL_FOR_TEST
- CHECK_EQ(0, strcmp(*v8::String::Utf8Value(ref_string),
+ CHECK_LT(0, strcmp(*v8::String::Utf8Value(ref_string),
*v8::String::Utf8Value(string)));
}
@@ -1768,7 +1798,7 @@ TEST(GetHeapValueForDeletedObject) {
static int StringCmp(const char* ref, i::String* act) {
- i::SmartArrayPointer<char> s_act = act->ToCString();
+ v8::base::SmartArrayPointer<char> s_act = act->ToCString();
int result = strcmp(ref, s_act.get());
if (result != 0)
fprintf(stderr, "Expected: \"%s\", Actual: \"%s\"\n", ref, s_act.get());
@@ -2082,6 +2112,7 @@ TEST(NoDebugObjectInSnapshot) {
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* root = snapshot->GetRoot();
int globals_count = 0;
+ bool found = false;
for (int i = 0; i < root->GetChildrenCount(); ++i) {
const v8::HeapGraphEdge* edge = root->GetChild(i);
if (edge->GetType() == v8::HeapGraphEdge::kShortcut) {
@@ -2089,10 +2120,13 @@ TEST(NoDebugObjectInSnapshot) {
const v8::HeapGraphNode* global = edge->GetToNode();
const v8::HeapGraphNode* foo =
GetProperty(global, v8::HeapGraphEdge::kProperty, "foo");
- CHECK(foo);
+ if (foo != nullptr) {
+ found = true;
+ }
}
}
- CHECK_EQ(1, globals_count);
+ CHECK_EQ(2, globals_count);
+ CHECK(found);
}
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index 848c768860..5d568e25c1 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -31,6 +31,7 @@
#include "src/v8.h"
#include "src/compilation-cache.h"
+#include "src/context-measure.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/factory.h"
@@ -40,9 +41,32 @@
#include "src/snapshot/snapshot.h"
#include "test/cctest/cctest.h"
-using namespace v8::internal;
using v8::Just;
+namespace v8 {
+namespace internal {
+
+// Tests that should have access to private methods of {v8::internal::Heap}.
+// Those tests need to be defined using HEAP_TEST(Name) { ... }.
+#define HEAP_TEST_METHODS(V) \
+ V(GCFlags)
+
+
+#define HEAP_TEST(Name) \
+ CcTest register_test_##Name(HeapTester::Test##Name, __FILE__, #Name, NULL, \
+ true, true); \
+ void HeapTester::Test##Name()
+
+
+class HeapTester {
+ public:
+#define DECLARE_STATIC(Name) static void Test##Name();
+
+ HEAP_TEST_METHODS(DECLARE_STATIC)
+#undef HEAP_TEST_METHODS
+};
+
+
static void CheckMap(Map* map, int type, int instance_size) {
CHECK(map->IsHeapObject());
#ifdef DEBUG
@@ -59,7 +83,10 @@ TEST(HeapMaps) {
Heap* heap = CcTest::heap();
CheckMap(heap->meta_map(), MAP_TYPE, Map::kSize);
CheckMap(heap->heap_number_map(), HEAP_NUMBER_TYPE, HeapNumber::kSize);
- CheckMap(heap->float32x4_map(), FLOAT32X4_TYPE, Float32x4::kSize);
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ CheckMap(heap->type##_map(), SIMD128_VALUE_TYPE, Type::kSize);
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
CheckMap(heap->fixed_array_map(), FIXED_ARRAY_TYPE, kVariableSizeSentinel);
CheckMap(heap->string_map(), STRING_TYPE, kVariableSizeSentinel);
}
@@ -215,20 +242,24 @@ TEST(HeapObjects) {
template <typename T, typename LANE_TYPE, int LANES>
-static void CheckSimdLanes(T* value) {
- // Get the original values, and check that all lanes can be set to new values
- // without disturbing the other lanes.
- LANE_TYPE lane_values[LANES];
+static void CheckSimdValue(T* value, LANE_TYPE lane_values[LANES],
+ LANE_TYPE other_value) {
+ // Check against lane_values, and check that all lanes can be set to
+ // other_value without disturbing the other lanes.
for (int i = 0; i < LANES; i++) {
- lane_values[i] = value->get_lane(i);
+ CHECK_EQ(lane_values[i], value->get_lane(i));
}
for (int i = 0; i < LANES; i++) {
- lane_values[i] += 1;
- value->set_lane(i, lane_values[i]);
+ value->set_lane(i, other_value); // change the value
for (int j = 0; j < LANES; j++) {
- CHECK_EQ(lane_values[j], value->get_lane(j));
+ if (i != j)
+ CHECK_EQ(lane_values[j], value->get_lane(j));
+ else
+ CHECK_EQ(other_value, value->get_lane(j));
}
+ value->set_lane(i, lane_values[i]); // restore the lane
}
+ CHECK(value->BooleanValue()); // SIMD values are 'true'.
}
@@ -239,32 +270,132 @@ TEST(SimdObjects) {
HandleScope sc(isolate);
- Handle<Object> value = factory->NewFloat32x4(1, 2, 3, 4);
- CHECK(value->IsFloat32x4());
- CHECK(value->BooleanValue()); // SIMD values map to true.
+ // Float32x4
+ {
+ float lanes[4] = {1, 2, 3, 4};
+ float quiet_NaN = std::numeric_limits<float>::quiet_NaN();
+ float signaling_NaN = std::numeric_limits<float>::signaling_NaN();
+
+ Handle<Float32x4> value = factory->NewFloat32x4(lanes);
+ CHECK(value->IsFloat32x4());
+ CheckSimdValue<Float32x4, float, 4>(*value, lanes, 3.14f);
+
+ // Check special lane values.
+ value->set_lane(1, -0.0);
+ CHECK_EQ(-0.0, value->get_lane(1));
+ CHECK(std::signbit(value->get_lane(1))); // Sign bit should be preserved.
+ value->set_lane(2, quiet_NaN);
+ CHECK(std::isnan(value->get_lane(2)));
+ value->set_lane(3, signaling_NaN);
+ CHECK(std::isnan(value->get_lane(3)));
+
+#ifdef OBJECT_PRINT
+ // Check value printing.
+ {
+ value = factory->NewFloat32x4(lanes);
+ std::ostringstream os;
+ value->Float32x4Print(os);
+ CHECK_EQ("1, 2, 3, 4", os.str());
+ }
+ {
+ float special_lanes[4] = {0, -0.0, quiet_NaN, signaling_NaN};
+ value = factory->NewFloat32x4(special_lanes);
+ std::ostringstream os;
+ value->Float32x4Print(os);
+ // Value printing doesn't preserve signed zeroes.
+ CHECK_EQ("0, 0, NaN, NaN", os.str());
+ }
+#endif // OBJECT_PRINT
+ }
+ // Int32x4
+ {
+ int32_t lanes[4] = {-1, 0, 1, 2};
+
+ Handle<Int32x4> value = factory->NewInt32x4(lanes);
+ CHECK(value->IsInt32x4());
+ CheckSimdValue<Int32x4, int32_t, 4>(*value, lanes, 3);
+
+#ifdef OBJECT_PRINT
+ std::ostringstream os;
+ value->Int32x4Print(os);
+ CHECK_EQ("-1, 0, 1, 2", os.str());
+#endif // OBJECT_PRINT
+ }
+ // Bool32x4
+ {
+ bool lanes[4] = {true, true, true, false};
+
+ Handle<Bool32x4> value = factory->NewBool32x4(lanes);
+ CHECK(value->IsBool32x4());
+ CheckSimdValue<Bool32x4, bool, 4>(*value, lanes, false);
+
+#ifdef OBJECT_PRINT
+ std::ostringstream os;
+ value->Bool32x4Print(os);
+ CHECK_EQ("true, true, true, false", os.str());
+#endif // OBJECT_PRINT
+ }
+ // Int16x8
+ {
+ int16_t lanes[8] = {-1, 0, 1, 2, 3, 4, 5, -32768};
+
+ Handle<Int16x8> value = factory->NewInt16x8(lanes);
+ CHECK(value->IsInt16x8());
+ CheckSimdValue<Int16x8, int16_t, 8>(*value, lanes, 32767);
+
+#ifdef OBJECT_PRINT
+ std::ostringstream os;
+ value->Int16x8Print(os);
+ CHECK_EQ("-1, 0, 1, 2, 3, 4, 5, -32768", os.str());
+#endif // OBJECT_PRINT
+ }
+ // Bool16x8
+ {
+ bool lanes[8] = {true, true, true, true, true, true, true, false};
+
+ Handle<Bool16x8> value = factory->NewBool16x8(lanes);
+ CHECK(value->IsBool16x8());
+ CheckSimdValue<Bool16x8, bool, 8>(*value, lanes, false);
+
+#ifdef OBJECT_PRINT
+ std::ostringstream os;
+ value->Bool16x8Print(os);
+ CHECK_EQ("true, true, true, true, true, true, true, false", os.str());
+#endif // OBJECT_PRINT
+ }
+ // Int8x16
+ {
+ int8_t lanes[16] = {-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -128};
- Float32x4* float32x4 = *Handle<Float32x4>::cast(value);
- CheckSimdLanes<Float32x4, float, 4>(float32x4);
+ Handle<Int8x16> value = factory->NewInt8x16(lanes);
+ CHECK(value->IsInt8x16());
+ CheckSimdValue<Int8x16, int8_t, 16>(*value, lanes, 127);
+
+#ifdef OBJECT_PRINT
+ std::ostringstream os;
+ value->Int8x16Print(os);
+ CHECK_EQ("-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -128",
+ os.str());
+#endif // OBJECT_PRINT
+ }
+ // Bool8x16
+ {
+ bool lanes[16] = {true, true, true, true, true, true, true, false,
+ true, true, true, true, true, true, true, false};
- // Check ToString for SIMD values.
- // TODO(bbudge): Switch to Check* style function to test ToString().
- value = factory->NewFloat32x4(1, 2, 3, 4);
- float32x4 = *Handle<Float32x4>::cast(value);
- std::ostringstream os;
- float32x4->Float32x4Print(os);
- CHECK_EQ("1, 2, 3, 4", os.str());
+ Handle<Bool8x16> value = factory->NewBool8x16(lanes);
+ CHECK(value->IsBool8x16());
+ CheckSimdValue<Bool8x16, bool, 16>(*value, lanes, false);
- // Check unusual lane values.
- float32x4->set_lane(0, 0);
- CHECK_EQ(0, float32x4->get_lane(0));
- float32x4->set_lane(1, -0.0);
- CHECK_EQ(-0.0, float32x4->get_lane(1));
- float quiet_NaN = std::numeric_limits<float>::quiet_NaN();
- float signaling_NaN = std::numeric_limits<float>::signaling_NaN();
- float32x4->set_lane(2, quiet_NaN);
- CHECK(std::isnan(float32x4->get_lane(2)));
- float32x4->set_lane(3, signaling_NaN);
- CHECK(std::isnan(float32x4->get_lane(3)));
+#ifdef OBJECT_PRINT
+ std::ostringstream os;
+ value->Bool8x16Print(os);
+ CHECK_EQ(
+ "true, true, true, true, true, true, true, false, true, true, true, "
+ "true, true, true, true, false",
+ os.str());
+#endif // OBJECT_PRINT
+ }
}
@@ -556,6 +687,46 @@ TEST(DeleteWeakGlobalHandle) {
}
+TEST(BytecodeArray) {
+ static const uint8_t kRawBytes[] = {0xc3, 0x7e, 0xa5, 0x5a};
+ static const int kRawBytesSize = sizeof(kRawBytes);
+ static const int kFrameSize = 32;
+
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
+ // Allocate and initialize BytecodeArray
+ Handle<BytecodeArray> array =
+ factory->NewBytecodeArray(kRawBytesSize, kRawBytes, kFrameSize);
+
+ CHECK(array->IsBytecodeArray());
+ CHECK_EQ(array->length(), (int)sizeof(kRawBytes));
+ CHECK_EQ(array->frame_size(), kFrameSize);
+ CHECK_LE(array->address(), array->GetFirstBytecodeAddress());
+ CHECK_GE(array->address() + array->BytecodeArraySize(),
+ array->GetFirstBytecodeAddress() + array->length());
+ for (int i = 0; i < kRawBytesSize; i++) {
+ CHECK_EQ(array->GetFirstBytecodeAddress()[i], kRawBytes[i]);
+ CHECK_EQ(array->get(i), kRawBytes[i]);
+ }
+
+ // Full garbage collection
+ heap->CollectAllGarbage();
+
+ // BytecodeArray should survive
+ CHECK_EQ(array->length(), kRawBytesSize);
+ CHECK_EQ(array->frame_size(), kFrameSize);
+
+ for (int i = 0; i < kRawBytesSize; i++) {
+ CHECK_EQ(array->get(i), kRawBytes[i]);
+ CHECK_EQ(array->GetFirstBytecodeAddress()[i], kRawBytes[i]);
+ }
+}
+
+
static const char* not_so_random_string_table[] = {
"abstract",
"boolean",
@@ -791,7 +962,7 @@ TEST(JSArray) {
CHECK(array->HasFastSmiOrObjectElements());
// array[length] = name.
- JSReceiver::SetElement(array, 0, name, SLOPPY).Check();
+ JSReceiver::SetElement(isolate, array, 0, name, SLOPPY).Check();
CHECK_EQ(Smi::FromInt(1), array->length());
element = i::Object::GetElement(isolate, array, 0).ToHandleChecked();
CHECK_EQ(*element, *name);
@@ -805,7 +976,7 @@ TEST(JSArray) {
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
// array[length] = name.
- JSReceiver::SetElement(array, int_length, name, SLOPPY).Check();
+ JSReceiver::SetElement(isolate, array, int_length, name, SLOPPY).Check();
uint32_t new_int_length = 0;
CHECK(array->length()->ToArrayIndex(&new_int_length));
CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
@@ -836,8 +1007,8 @@ TEST(JSObjectCopy) {
JSReceiver::SetProperty(obj, first, one, SLOPPY).Check();
JSReceiver::SetProperty(obj, second, two, SLOPPY).Check();
- JSReceiver::SetElement(obj, 0, first, SLOPPY).Check();
- JSReceiver::SetElement(obj, 1, second, SLOPPY).Check();
+ JSReceiver::SetElement(isolate, obj, 0, first, SLOPPY).Check();
+ JSReceiver::SetElement(isolate, obj, 1, second, SLOPPY).Check();
// Make the clone.
Handle<Object> value1, value2;
@@ -862,8 +1033,8 @@ TEST(JSObjectCopy) {
JSReceiver::SetProperty(clone, first, two, SLOPPY).Check();
JSReceiver::SetProperty(clone, second, one, SLOPPY).Check();
- JSReceiver::SetElement(clone, 0, second, SLOPPY).Check();
- JSReceiver::SetElement(clone, 1, first, SLOPPY).Check();
+ JSReceiver::SetElement(isolate, clone, 0, second, SLOPPY).Check();
+ JSReceiver::SetElement(isolate, clone, 1, first, SLOPPY).Check();
value1 = Object::GetElement(isolate, obj, 1).ToHandleChecked();
value2 = Object::GetElement(isolate, clone, 0).ToHandleChecked();
@@ -973,22 +1144,6 @@ TEST(Iteration) {
}
-TEST(EmptyHandleEscapeFrom) {
- CcTest::InitializeVM();
-
- v8::HandleScope scope(CcTest::isolate());
- Handle<JSObject> runaway;
-
- {
- v8::EscapableHandleScope nested(CcTest::isolate());
- Handle<JSObject> empty;
- runaway = empty.EscapeFrom(&nested);
- }
-
- CHECK(runaway.is_null());
-}
-
-
static int LenFromSize(int size) {
return (size - FixedArray::kHeaderSize) / kPointerSize;
}
@@ -1012,7 +1167,7 @@ TEST(Regression39128) {
// Step 1: prepare a map for the object. We add 1 inobject property to it.
// Create a map with single inobject property.
Handle<Map> my_map = Map::Create(CcTest::i_isolate(), 1);
- int n_properties = my_map->inobject_properties();
+ int n_properties = my_map->GetInObjectProperties();
CHECK_GT(n_properties, 0);
int object_size = my_map->instance_size();
@@ -1523,7 +1678,8 @@ int CountNativeContexts() {
count++;
object = Context::cast(object)->get(Context::NEXT_CONTEXT_LINK);
}
- return count;
+ // Subtract one to compensate for the code stub context that is always present
+ return count - 1;
}
@@ -1661,7 +1817,8 @@ static int CountNativeContextsWithGC(Isolate* isolate, int n) {
Handle<Object>(Context::cast(*object)->get(Context::NEXT_CONTEXT_LINK),
isolate);
}
- return count;
+ // Subtract one to compensate for the code stub context that is always present
+ return count - 1;
}
@@ -2238,7 +2395,10 @@ static int NumberOfGlobalObjects() {
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsGlobalObject()) count++;
}
- return count;
+ // Subtract two to compensate for the two global objects (not global
+ // JSObjects, of which there would only be one) that are part of the code stub
+ // context, which is always present.
+ return count - 2;
}
@@ -2450,7 +2610,7 @@ TEST(InstanceOfStubWriteBarrier) {
}
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
- marking->Abort();
+ marking->Stop();
marking->Start(Heap::kNoGCFlags);
Handle<JSFunction> f =
@@ -2578,7 +2738,7 @@ TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
CHECK(f->IsOptimized());
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
- marking->Abort();
+ marking->Stop();
marking->Start(Heap::kNoGCFlags);
// The following calls will increment CcTest::heap()->global_ic_age().
CcTest::isolate()->ContextDisposedNotification();
@@ -2619,7 +2779,7 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
CcTest::global()->Get(v8_str("f"))));
CHECK(f->IsOptimized());
- CcTest::heap()->incremental_marking()->Abort();
+ CcTest::heap()->incremental_marking()->Stop();
// The following two calls will increment CcTest::heap()->global_ic_age().
CcTest::isolate()->ContextDisposedNotification();
@@ -2631,12 +2791,43 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
}
+HEAP_TEST(GCFlags) {
+ CcTest::InitializeVM();
+ Heap* heap = CcTest::heap();
+
+ heap->set_current_gc_flags(Heap::kNoGCFlags);
+ CHECK_EQ(Heap::kNoGCFlags, heap->current_gc_flags());
+
+ // Set the flags to check whether we appropriately resets them after the GC.
+ heap->set_current_gc_flags(Heap::kAbortIncrementalMarkingMask);
+ heap->CollectAllGarbage(Heap::kReduceMemoryFootprintMask);
+ CHECK_EQ(Heap::kNoGCFlags, heap->current_gc_flags());
+
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ if (collector->sweeping_in_progress()) {
+ collector->EnsureSweepingCompleted();
+ }
+
+ IncrementalMarking* marking = heap->incremental_marking();
+ marking->Stop();
+ marking->Start(Heap::kReduceMemoryFootprintMask);
+ CHECK_NE(0, heap->current_gc_flags() & Heap::kReduceMemoryFootprintMask);
+
+ heap->CollectGarbage(NEW_SPACE);
+ // NewSpace scavenges should not overwrite the flags.
+ CHECK_NE(0, heap->current_gc_flags() & Heap::kReduceMemoryFootprintMask);
+
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ CHECK_EQ(Heap::kNoGCFlags, heap->current_gc_flags());
+}
+
+
TEST(IdleNotificationFinishMarking) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
SimulateFullSpace(CcTest::heap()->old_space());
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
- marking->Abort();
+ marking->Stop();
marking->Start(Heap::kNoGCFlags);
CHECK_EQ(CcTest::heap()->gc_count(), 0);
@@ -3681,8 +3872,14 @@ static void CheckVectorIC(Handle<JSFunction> f, int ic_slot_index,
Handle<TypeFeedbackVector> vector =
Handle<TypeFeedbackVector>(f->shared()->feedback_vector());
FeedbackVectorICSlot slot(ic_slot_index);
- LoadICNexus nexus(vector, slot);
- CHECK(nexus.StateFromFeedback() == desired_state);
+ if (vector->GetKind(slot) == Code::LOAD_IC) {
+ LoadICNexus nexus(vector, slot);
+ CHECK(nexus.StateFromFeedback() == desired_state);
+ } else {
+ CHECK(vector->GetKind(slot) == Code::KEYED_LOAD_IC);
+ KeyedLoadICNexus nexus(vector, slot);
+ CHECK(nexus.StateFromFeedback() == desired_state);
+ }
}
@@ -3695,6 +3892,38 @@ static void CheckVectorICCleared(Handle<JSFunction> f, int ic_slot_index) {
}
+TEST(ICInBuiltInIsClearedAppropriately) {
+ if (i::FLAG_always_opt) return;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ Handle<JSFunction> apply;
+ {
+ LocalContext env;
+ v8::Local<v8::Value> res = CompileRun("Function.apply");
+ Handle<JSObject> maybe_apply =
+ v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
+ apply = Handle<JSFunction>::cast(maybe_apply);
+ TypeFeedbackVector* vector = apply->shared()->feedback_vector();
+ CHECK(vector->ICSlots() == 1);
+ CheckVectorIC(apply, 0, UNINITIALIZED);
+ CompileRun(
+ "function b(a1, a2, a3) { return a1 + a2 + a3; }"
+ "function fun(bar) { bar.apply({}, [1, 2, 3]); };"
+ "fun(b); fun(b)");
+ CheckVectorIC(apply, 0, MONOMORPHIC);
+ }
+
+ // Fire context dispose notification.
+ CcTest::isolate()->ContextDisposedNotification();
+ SimulateIncrementalMarking(CcTest::heap());
+ CcTest::heap()->CollectAllGarbage();
+
+ // The IC in apply has been cleared, ready to learn again.
+ CheckVectorIC(apply, 0, PREMONOMORPHIC);
+}
+
+
TEST(IncrementalMarkingPreservesMonomorphicConstructor) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
@@ -4368,6 +4597,149 @@ TEST(Regress173458) {
}
+#ifdef DEBUG
+TEST(Regress513507) {
+ i::FLAG_flush_optimized_code_cache = false;
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_gc_global = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ HandleScope scope(isolate);
+
+ // Prepare function whose optimized code map we can use.
+ Handle<SharedFunctionInfo> shared;
+ {
+ HandleScope inner_scope(isolate);
+ CompileRun("function f() { return 1 }"
+ "f(); %OptimizeFunctionOnNextCall(f); f();");
+
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ CcTest::global()->Get(v8_str("f"))));
+ shared = inner_scope.CloseAndEscape(handle(f->shared(), isolate));
+ CompileRun("f = null");
+ }
+
+ // Prepare optimized code that we can use.
+ Handle<Code> code;
+ {
+ HandleScope inner_scope(isolate);
+ CompileRun("function g() { return 2 }"
+ "g(); %OptimizeFunctionOnNextCall(g); g();");
+
+ Handle<JSFunction> g =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ CcTest::global()->Get(v8_str("g"))));
+ code = inner_scope.CloseAndEscape(handle(g->code(), isolate));
+ if (!code->is_optimized_code()) return;
+ }
+
+ Handle<FixedArray> lit = isolate->factory()->empty_fixed_array();
+ Handle<Context> context(isolate->context());
+
+ // Add the new code several times to the optimized code map and also set an
+ // allocation timeout so that expanding the code map will trigger a GC.
+ heap->set_allocation_timeout(5);
+ FLAG_gc_interval = 1000;
+ for (int i = 0; i < 10; ++i) {
+ BailoutId id = BailoutId(i);
+ SharedFunctionInfo::AddToOptimizedCodeMap(shared, context, code, lit, id);
+ }
+}
+#endif // DEBUG
+
+
+TEST(Regress514122) {
+ i::FLAG_flush_optimized_code_cache = false;
+ i::FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ HandleScope scope(isolate);
+
+ // Perfrom one initial GC to enable code flushing.
+ CcTest::heap()->CollectAllGarbage();
+
+ // Prepare function whose optimized code map we can use.
+ Handle<SharedFunctionInfo> shared;
+ {
+ HandleScope inner_scope(isolate);
+ CompileRun("function f() { return 1 }"
+ "f(); %OptimizeFunctionOnNextCall(f); f();");
+
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ CcTest::global()->Get(v8_str("f"))));
+ shared = inner_scope.CloseAndEscape(handle(f->shared(), isolate));
+ CompileRun("f = null");
+ }
+
+ // Prepare optimized code that we can use.
+ Handle<Code> code;
+ {
+ HandleScope inner_scope(isolate);
+ CompileRun("function g() { return 2 }"
+ "g(); %OptimizeFunctionOnNextCall(g); g();");
+
+ Handle<JSFunction> g =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ CcTest::global()->Get(v8_str("g"))));
+ code = inner_scope.CloseAndEscape(handle(g->code(), isolate));
+ if (!code->is_optimized_code()) return;
+ }
+
+ Handle<FixedArray> lit = isolate->factory()->empty_fixed_array();
+ Handle<Context> context(isolate->context());
+
+ // Add the code several times to the optimized code map.
+ for (int i = 0; i < 3; ++i) {
+ HandleScope inner_scope(isolate);
+ BailoutId id = BailoutId(i);
+ SharedFunctionInfo::AddToOptimizedCodeMap(shared, context, code, lit, id);
+ }
+ shared->optimized_code_map()->Print();
+
+ // Add the code with a literals array to be evacuated.
+ Page* evac_page;
+ {
+ HandleScope inner_scope(isolate);
+ AlwaysAllocateScope always_allocate(isolate);
+ // Make sure literal is placed on an old-space evacuation candidate.
+ SimulateFullSpace(heap->old_space());
+ Handle<FixedArray> lit = isolate->factory()->NewFixedArray(23, TENURED);
+ evac_page = Page::FromAddress(lit->address());
+ BailoutId id = BailoutId(100);
+ SharedFunctionInfo::AddToOptimizedCodeMap(shared, context, code, lit, id);
+ }
+
+ // Heap is ready, force {lit_page} to become an evacuation candidate and
+ // simulate incremental marking to enqueue optimized code map.
+ FLAG_manual_evacuation_candidates_selection = true;
+ evac_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ SimulateIncrementalMarking(heap);
+
+ // No matter whether reachable or not, {boomer} is doomed.
+ Handle<Object> boomer(shared->optimized_code_map(), isolate);
+
+ // Add the code several times to the optimized code map. This will leave old
+ // copies of the optimized code map unreachable but still marked.
+ for (int i = 3; i < 6; ++i) {
+ HandleScope inner_scope(isolate);
+ BailoutId id = BailoutId(i);
+ SharedFunctionInfo::AddToOptimizedCodeMap(shared, context, code, lit, id);
+ }
+
+ // Trigger a GC to flush out the bug.
+ heap->CollectGarbage(i::OLD_SPACE, "fire in the hole");
+ boomer->Print();
+}
+
+
class DummyVisitor : public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) { }
@@ -5242,6 +5614,31 @@ TEST(Regress357137) {
}
+TEST(Regress507979) {
+ const int kFixedArrayLen = 10;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ HandleScope handle_scope(isolate);
+
+ Handle<FixedArray> o1 = isolate->factory()->NewFixedArray(kFixedArrayLen);
+ Handle<FixedArray> o2 = isolate->factory()->NewFixedArray(kFixedArrayLen);
+ CHECK(heap->InNewSpace(o1->address()));
+ CHECK(heap->InNewSpace(o2->address()));
+
+ HeapIterator it(heap, i::HeapIterator::kFilterUnreachable);
+
+ // Replace parts of an object placed before a live object with a filler. This
+ // way the filler object shares the mark bits with the following live object.
+ o1->Shrink(kFixedArrayLen - 1);
+
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+ // Let's not optimize the loop away.
+ CHECK(obj->address() != nullptr);
+ }
+}
+
+
TEST(ArrayShiftSweeping) {
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
@@ -5387,7 +5784,7 @@ TEST(Regress388880) {
CHECK_EQ(Page::kObjectStartOffset, page->Offset(temp2->address()));
}
- Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED, false);
+ Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED);
o->set_properties(*factory->empty_fixed_array());
// Ensure that the object allocated where we need it.
@@ -5399,7 +5796,7 @@ TEST(Regress388880) {
// Enable incremental marking to trigger actions in Heap::AdjustLiveBytes()
// that would cause crash.
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
- marking->Abort();
+ marking->Stop();
marking->Start(Heap::kNoGCFlags);
CHECK(marking->IsMarking());
@@ -6027,31 +6424,60 @@ TEST(SlotsBufferObjectSlotsRemoval) {
// Firstly, let's test the regular slots buffer entry.
buffer->Add(HeapObject::RawField(*array, FixedArray::kHeaderSize));
- DCHECK(reinterpret_cast<void*>(buffer->Get(0)) ==
- HeapObject::RawField(*array, FixedArray::kHeaderSize));
+ CHECK(reinterpret_cast<void*>(buffer->Get(0)) ==
+ HeapObject::RawField(*array, FixedArray::kHeaderSize));
SlotsBuffer::RemoveObjectSlots(CcTest::i_isolate()->heap(), buffer,
array->address(),
array->address() + array->Size());
- DCHECK(reinterpret_cast<void*>(buffer->Get(0)) ==
- HeapObject::RawField(heap->empty_fixed_array(),
- FixedArrayBase::kLengthOffset));
+ CHECK(reinterpret_cast<void*>(buffer->Get(0)) ==
+ HeapObject::RawField(heap->empty_fixed_array(),
+ FixedArrayBase::kLengthOffset));
// Secondly, let's test the typed slots buffer entry.
SlotsBuffer::AddTo(NULL, &buffer, SlotsBuffer::EMBEDDED_OBJECT_SLOT,
array->address() + FixedArray::kHeaderSize,
SlotsBuffer::FAIL_ON_OVERFLOW);
- DCHECK(reinterpret_cast<void*>(buffer->Get(1)) ==
- reinterpret_cast<Object**>(SlotsBuffer::EMBEDDED_OBJECT_SLOT));
- DCHECK(reinterpret_cast<void*>(buffer->Get(2)) ==
- HeapObject::RawField(*array, FixedArray::kHeaderSize));
+ CHECK(reinterpret_cast<void*>(buffer->Get(1)) ==
+ reinterpret_cast<Object**>(SlotsBuffer::EMBEDDED_OBJECT_SLOT));
+ CHECK(reinterpret_cast<void*>(buffer->Get(2)) ==
+ HeapObject::RawField(*array, FixedArray::kHeaderSize));
SlotsBuffer::RemoveObjectSlots(CcTest::i_isolate()->heap(), buffer,
array->address(),
array->address() + array->Size());
- DCHECK(reinterpret_cast<void*>(buffer->Get(1)) ==
- HeapObject::RawField(heap->empty_fixed_array(),
- FixedArrayBase::kLengthOffset));
- DCHECK(reinterpret_cast<void*>(buffer->Get(2)) ==
- HeapObject::RawField(heap->empty_fixed_array(),
- FixedArrayBase::kLengthOffset));
+ CHECK(reinterpret_cast<void*>(buffer->Get(1)) ==
+ HeapObject::RawField(heap->empty_fixed_array(),
+ FixedArrayBase::kLengthOffset));
+ CHECK(reinterpret_cast<void*>(buffer->Get(2)) ==
+ HeapObject::RawField(heap->empty_fixed_array(),
+ FixedArrayBase::kLengthOffset));
delete buffer;
}
+
+
+TEST(ContextMeasure) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ LocalContext context;
+
+ int size_upper_limit = 0;
+ int count_upper_limit = 0;
+ HeapIterator it(CcTest::heap());
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+ size_upper_limit += obj->Size();
+ count_upper_limit++;
+ }
+
+ ContextMeasure measure(*isolate->native_context());
+
+ PrintF("Context size : %d bytes\n", measure.Size());
+ PrintF("Context object count: %d\n", measure.Count());
+
+ CHECK_LE(1000, measure.Count());
+ CHECK_LE(50000, measure.Size());
+
+ CHECK_LE(measure.Count(), count_upper_limit);
+ CHECK_LE(measure.Size(), size_upper_limit);
+}
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-liveedit.cc b/deps/v8/test/cctest/test-liveedit.cc
index 8419dc5a43..fdda3f53c6 100644
--- a/deps/v8/test/cctest/test-liveedit.cc
+++ b/deps/v8/test/cctest/test-liveedit.cc
@@ -29,7 +29,7 @@
#include "src/v8.h"
-#include "src/liveedit.h"
+#include "src/debug/liveedit.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index c86a9d67dc..f1dc5a28b4 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -31,11 +31,11 @@
#include "src/api.h"
#include "src/base/platform/platform.h"
+#include "src/base/smart-pointers.h"
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/isolate.h"
#include "src/parser.h"
-#include "src/smart-pointers.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
@@ -101,7 +101,7 @@ TEST(KangarooIsolates) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
- i::SmartPointer<KangarooThread> thread1;
+ v8::base::SmartPointer<KangarooThread> thread1;
{
v8::Locker locker(isolate);
v8::Isolate::Scope isolate_scope(isolate);
@@ -464,7 +464,8 @@ class LockAndUnlockDifferentIsolatesThread : public JoinableThread {
}
virtual void Run() {
- i::SmartPointer<LockIsolateAndCalculateFibSharedContextThread> thread;
+ v8::base::SmartPointer<LockIsolateAndCalculateFibSharedContextThread>
+ thread;
v8::Locker lock1(isolate1_);
CHECK(v8::Locker::IsLocked(isolate1_));
CHECK(!v8::Locker::IsLocked(isolate2_));
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 4ff8cba68b..4cc52a11e2 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -736,28 +736,27 @@ static void SmiAddTest(MacroAssembler* masm,
__ movl(rcx, Immediate(first));
__ Integer32ToSmi(rcx, rcx);
- i::SmiOperationExecutionMode mode;
- mode.Add(i::PRESERVE_SOURCE_REGISTER);
- mode.Add(i::BAILOUT_ON_OVERFLOW);
+ i::SmiOperationConstraints constraints =
+ i::SmiOperationConstraint::kPreserveSourceRegister |
+ i::SmiOperationConstraint::kBailoutOnOverflow;
__ incq(rax);
- __ SmiAddConstant(r9, rcx, Smi::FromInt(second), mode, exit);
+ __ SmiAddConstant(r9, rcx, Smi::FromInt(second), constraints, exit);
__ cmpq(r9, r8);
__ j(not_equal, exit);
__ incq(rax);
- __ SmiAddConstant(rcx, rcx, Smi::FromInt(second), mode, exit);
+ __ SmiAddConstant(rcx, rcx, Smi::FromInt(second), constraints, exit);
__ cmpq(rcx, r8);
__ j(not_equal, exit);
__ movl(rcx, Immediate(first));
__ Integer32ToSmi(rcx, rcx);
- mode.RemoveAll();
- mode.Add(i::PRESERVE_SOURCE_REGISTER);
- mode.Add(i::BAILOUT_ON_NO_OVERFLOW);
+ constraints = i::SmiOperationConstraint::kPreserveSourceRegister |
+ i::SmiOperationConstraint::kBailoutOnNoOverflow;
Label done;
__ incq(rax);
- __ SmiAddConstant(rcx, rcx, Smi::FromInt(second), mode, &done);
+ __ SmiAddConstant(rcx, rcx, Smi::FromInt(second), constraints, &done);
__ jmp(exit);
__ bind(&done);
__ cmpq(rcx, r8);
@@ -799,14 +798,14 @@ static void SmiAddOverflowTest(MacroAssembler* masm,
__ j(not_equal, exit);
}
- i::SmiOperationExecutionMode mode;
- mode.Add(i::PRESERVE_SOURCE_REGISTER);
- mode.Add(i::BAILOUT_ON_OVERFLOW);
+ i::SmiOperationConstraints constraints =
+ i::SmiOperationConstraint::kPreserveSourceRegister |
+ i::SmiOperationConstraint::kBailoutOnOverflow;
__ movq(rcx, r11);
{
Label overflow_ok;
__ incq(rax);
- __ SmiAddConstant(r9, rcx, Smi::FromInt(y_min), mode, &overflow_ok);
+ __ SmiAddConstant(r9, rcx, Smi::FromInt(y_min), constraints, &overflow_ok);
__ jmp(exit);
__ bind(&overflow_ok);
__ incq(rax);
@@ -817,7 +816,7 @@ static void SmiAddOverflowTest(MacroAssembler* masm,
{
Label overflow_ok;
__ incq(rax);
- __ SmiAddConstant(rcx, rcx, Smi::FromInt(y_min), mode, &overflow_ok);
+ __ SmiAddConstant(rcx, rcx, Smi::FromInt(y_min), constraints, &overflow_ok);
__ jmp(exit);
__ bind(&overflow_ok);
__ incq(rax);
@@ -853,7 +852,7 @@ static void SmiAddOverflowTest(MacroAssembler* masm,
{
Label overflow_ok;
__ incq(rax);
- __ SmiAddConstant(r9, rcx, Smi::FromInt(y_max), mode, &overflow_ok);
+ __ SmiAddConstant(r9, rcx, Smi::FromInt(y_max), constraints, &overflow_ok);
__ jmp(exit);
__ bind(&overflow_ok);
__ incq(rax);
@@ -861,12 +860,11 @@ static void SmiAddOverflowTest(MacroAssembler* masm,
__ j(not_equal, exit);
}
- mode.RemoveAll();
- mode.Add(i::BAILOUT_ON_OVERFLOW);
+ constraints = i::SmiOperationConstraint::kBailoutOnOverflow;
{
Label overflow_ok;
__ incq(rax);
- __ SmiAddConstant(rcx, rcx, Smi::FromInt(y_max), mode, &overflow_ok);
+ __ SmiAddConstant(rcx, rcx, Smi::FromInt(y_max), constraints, &overflow_ok);
__ jmp(exit);
__ bind(&overflow_ok);
__ incq(rax);
@@ -952,28 +950,27 @@ static void SmiSubTest(MacroAssembler* masm,
__ cmpq(rcx, r8);
__ j(not_equal, exit);
- i::SmiOperationExecutionMode mode;
- mode.Add(i::PRESERVE_SOURCE_REGISTER);
- mode.Add(i::BAILOUT_ON_OVERFLOW);
+ i::SmiOperationConstraints constraints =
+ i::SmiOperationConstraint::kPreserveSourceRegister |
+ i::SmiOperationConstraint::kBailoutOnOverflow;
__ Move(rcx, Smi::FromInt(first));
__ incq(rax); // Test 4.
- __ SmiSubConstant(rcx, rcx, Smi::FromInt(second), mode, exit);
+ __ SmiSubConstant(rcx, rcx, Smi::FromInt(second), constraints, exit);
__ cmpq(rcx, r8);
__ j(not_equal, exit);
__ Move(rcx, Smi::FromInt(first));
__ incq(rax); // Test 5.
- __ SmiSubConstant(r9, rcx, Smi::FromInt(second), mode, exit);
+ __ SmiSubConstant(r9, rcx, Smi::FromInt(second), constraints, exit);
__ cmpq(r9, r8);
__ j(not_equal, exit);
- mode.RemoveAll();
- mode.Add(i::PRESERVE_SOURCE_REGISTER);
- mode.Add(i::BAILOUT_ON_NO_OVERFLOW);
+ constraints = i::SmiOperationConstraint::kPreserveSourceRegister |
+ i::SmiOperationConstraint::kBailoutOnNoOverflow;
__ Move(rcx, Smi::FromInt(first));
Label done;
__ incq(rax); // Test 6.
- __ SmiSubConstant(rcx, rcx, Smi::FromInt(second), mode, &done);
+ __ SmiSubConstant(rcx, rcx, Smi::FromInt(second), constraints, &done);
__ jmp(exit);
__ bind(&done);
__ cmpq(rcx, r8);
@@ -1015,15 +1012,15 @@ static void SmiSubOverflowTest(MacroAssembler* masm,
__ j(not_equal, exit);
}
- i::SmiOperationExecutionMode mode;
- mode.Add(i::PRESERVE_SOURCE_REGISTER);
- mode.Add(i::BAILOUT_ON_OVERFLOW);
+ i::SmiOperationConstraints constraints =
+ i::SmiOperationConstraint::kPreserveSourceRegister |
+ i::SmiOperationConstraint::kBailoutOnOverflow;
__ movq(rcx, r11);
{
Label overflow_ok;
__ incq(rax);
- __ SmiSubConstant(r9, rcx, Smi::FromInt(y_min), mode, &overflow_ok);
+ __ SmiSubConstant(r9, rcx, Smi::FromInt(y_min), constraints, &overflow_ok);
__ jmp(exit);
__ bind(&overflow_ok);
__ incq(rax);
@@ -1034,7 +1031,7 @@ static void SmiSubOverflowTest(MacroAssembler* masm,
{
Label overflow_ok;
__ incq(rax);
- __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_min), mode, &overflow_ok);
+ __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_min), constraints, &overflow_ok);
__ jmp(exit);
__ bind(&overflow_ok);
__ incq(rax);
@@ -1070,7 +1067,7 @@ static void SmiSubOverflowTest(MacroAssembler* masm,
{
Label overflow_ok;
__ incq(rax);
- __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_max), mode, &overflow_ok);
+ __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_max), constraints, &overflow_ok);
__ jmp(exit);
__ bind(&overflow_ok);
__ incq(rax);
@@ -1078,13 +1075,12 @@ static void SmiSubOverflowTest(MacroAssembler* masm,
__ j(not_equal, exit);
}
- mode.RemoveAll();
- mode.Add(i::BAILOUT_ON_OVERFLOW);
+ constraints = i::SmiOperationConstraint::kBailoutOnOverflow;
__ movq(rcx, r11);
{
Label overflow_ok;
__ incq(rax);
- __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_max), mode, &overflow_ok);
+ __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_max), constraints, &overflow_ok);
__ jmp(exit);
__ bind(&overflow_ok);
__ incq(rax);
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index a8cc6c7855..73369d29e5 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -39,7 +39,7 @@
#include "src/v8.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
#include "test/cctest/cctest.h"
@@ -59,7 +59,7 @@ TEST(MarkingDeque) {
Address original_address = reinterpret_cast<Address>(&s);
Address current_address = original_address;
while (!s.IsFull()) {
- s.PushBlack(HeapObject::FromAddress(current_address));
+ s.Push(HeapObject::FromAddress(current_address));
current_address += kPointerSize;
}
diff --git a/deps/v8/test/cctest/test-mementos.cc b/deps/v8/test/cctest/test-mementos.cc
index 9aa1e6d30e..a97666384b 100644
--- a/deps/v8/test/cctest/test-mementos.cc
+++ b/deps/v8/test/cctest/test-mementos.cc
@@ -101,6 +101,7 @@ TEST(PretenuringCallNew) {
CcTest::InitializeVM();
if (!i::FLAG_allocation_site_pretenuring) return;
if (!i::FLAG_pretenuring_call_new) return;
+ if (i::FLAG_always_opt) return;
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
diff --git a/deps/v8/test/cctest/test-migrations.cc b/deps/v8/test/cctest/test-migrations.cc
index 0cefd54ceb..3ace0488f8 100644
--- a/deps/v8/test/cctest/test-migrations.cc
+++ b/deps/v8/test/cctest/test-migrations.cc
@@ -14,7 +14,6 @@
#include "src/global-handles.h"
#include "src/ic/stub-cache.h"
#include "src/macro-assembler.h"
-#include "src/smart-pointers.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-object-observe.cc b/deps/v8/test/cctest/test-object-observe.cc
index 0295de5e23..07cc772df0 100644
--- a/deps/v8/test/cctest/test-object-observe.cc
+++ b/deps/v8/test/cctest/test-object-observe.cc
@@ -711,7 +711,7 @@ TEST(DontLeakContextOnObserve) {
}
CcTest::isolate()->ContextDisposedNotification();
- CheckSurvivingGlobalObjectsCount(1);
+ CheckSurvivingGlobalObjectsCount(2);
}
@@ -732,7 +732,7 @@ TEST(DontLeakContextOnGetNotifier) {
}
CcTest::isolate()->ContextDisposedNotification();
- CheckSurvivingGlobalObjectsCount(1);
+ CheckSurvivingGlobalObjectsCount(2);
}
@@ -759,7 +759,7 @@ TEST(DontLeakContextOnNotifierPerformChange) {
}
CcTest::isolate()->ContextDisposedNotification();
- CheckSurvivingGlobalObjectsCount(1);
+ CheckSurvivingGlobalObjectsCount(2);
}
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index cfb43911aa..23a3d2621a 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -71,8 +71,6 @@ TEST(ScanKeywords) {
{
i::Utf8ToUtf16CharacterStream stream(keyword, length);
i::Scanner scanner(&unicode_cache);
- // The scanner should parse Harmony keywords for this test.
- scanner.SetHarmonyModules(true);
scanner.Initialize(&stream);
CHECK_EQ(key_token.token, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -506,7 +504,8 @@ TEST(PreParseOverflow) {
i::GetCurrentStackPosition() - 128 * 1024);
size_t kProgramSize = 1024 * 1024;
- i::SmartArrayPointer<char> program(i::NewArray<char>(kProgramSize + 1));
+ v8::base::SmartArrayPointer<char> program(
+ i::NewArray<char>(kProgramSize + 1));
memset(program.get(), '(', kProgramSize);
program[kProgramSize] = '\0';
@@ -560,7 +559,7 @@ void TestCharacterStream(const char* one_byte_source, unsigned length,
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
i::HandleScope test_scope(isolate);
- i::SmartArrayPointer<i::uc16> uc16_buffer(new i::uc16[length]);
+ v8::base::SmartArrayPointer<i::uc16> uc16_buffer(new i::uc16[length]);
for (unsigned i = 0; i < length; i++) {
uc16_buffer[i] = static_cast<i::uc16>(one_byte_source[i]);
}
@@ -1071,9 +1070,9 @@ TEST(ScopeUsesArgumentsSuperThis) {
CHECK(parser.Parse(&info));
CHECK(i::Rewriter::Rewrite(&info));
CHECK(i::Scope::Analyze(&info));
- CHECK(info.function() != NULL);
+ CHECK(info.literal() != NULL);
- i::Scope* script_scope = info.function()->scope();
+ i::Scope* script_scope = info.literal()->scope();
CHECK(script_scope->is_script_scope());
CHECK_EQ(1, script_scope->inner_scopes()->length());
@@ -1370,10 +1369,10 @@ TEST(ScopePositions) {
info.set_global();
info.set_language_mode(source_data[i].language_mode);
parser.Parse(&info);
- CHECK(info.function() != NULL);
+ CHECK(info.literal() != NULL);
// Check scope types and positions.
- i::Scope* scope = info.function()->scope();
+ i::Scope* scope = info.literal()->scope();
CHECK(scope->is_script_scope());
CHECK_EQ(scope->start_position(), 0);
CHECK_EQ(scope->end_position(), kProgramSize);
@@ -1426,12 +1425,10 @@ i::Handle<i::String> FormatMessage(i::Vector<unsigned> data) {
enum ParserFlag {
kAllowLazy,
kAllowNatives,
- kAllowHarmonyModules,
kAllowHarmonyArrowFunctions,
kAllowHarmonyRestParameters,
kAllowHarmonySloppy,
- kAllowHarmonyUnicode,
- kAllowHarmonyComputedPropertyNames,
+ kAllowHarmonySloppyLet,
kAllowHarmonySpreadCalls,
kAllowHarmonyDestructuring,
kAllowHarmonySpreadArrays,
@@ -1452,17 +1449,14 @@ void SetParserFlags(i::ParserBase<Traits>* parser,
i::EnumSet<ParserFlag> flags) {
parser->set_allow_lazy(flags.Contains(kAllowLazy));
parser->set_allow_natives(flags.Contains(kAllowNatives));
- parser->set_allow_harmony_modules(flags.Contains(kAllowHarmonyModules));
parser->set_allow_harmony_arrow_functions(
flags.Contains(kAllowHarmonyArrowFunctions));
- parser->set_allow_harmony_rest_params(
+ parser->set_allow_harmony_rest_parameters(
flags.Contains(kAllowHarmonyRestParameters));
parser->set_allow_harmony_spreadcalls(
flags.Contains(kAllowHarmonySpreadCalls));
parser->set_allow_harmony_sloppy(flags.Contains(kAllowHarmonySloppy));
- parser->set_allow_harmony_unicode(flags.Contains(kAllowHarmonyUnicode));
- parser->set_allow_harmony_computed_property_names(
- flags.Contains(kAllowHarmonyComputedPropertyNames));
+ parser->set_allow_harmony_sloppy_let(flags.Contains(kAllowHarmonySloppyLet));
parser->set_allow_harmony_destructuring(
flags.Contains(kAllowHarmonyDestructuring));
parser->set_allow_harmony_spread_arrays(
@@ -1512,7 +1506,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
SetParserFlags(&parser, flags);
info.set_global();
parser.Parse(&info);
- function = info.function();
+ function = info.literal();
if (function) {
parser_materialized_literals = function->materialized_literal_count();
}
@@ -3450,9 +3444,9 @@ TEST(InnerAssignment) {
i::Parser parser(&info);
CHECK(parser.Parse(&info));
CHECK(i::Compiler::Analyze(&info));
- CHECK(info.function() != NULL);
+ CHECK(info.literal() != NULL);
- i::Scope* scope = info.function()->scope();
+ i::Scope* scope = info.literal()->scope();
CHECK_EQ(scope->inner_scopes()->length(), 1);
i::Scope* inner_scope = scope->inner_scopes()->at(0);
const i::AstRawString* var_name =
@@ -4923,9 +4917,7 @@ TEST(InvalidUnicodeEscapes) {
"var foob\\v{1234}r = 0;",
"var foob\\U{1234}r = 0;",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonyUnicode};
- RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError);
}
@@ -4951,9 +4943,7 @@ TEST(UnicodeEscapes) {
// Max value for the unicode escape
"\"\\u{10ffff}\"",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonyUnicode};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kSuccess);
}
@@ -5332,7 +5322,6 @@ TEST(ComputedPropertyName) {
NULL};
static const ParserFlag always_flags[] = {
- kAllowHarmonyComputedPropertyNames,
kAllowHarmonySloppy,
};
RunParserSyncTest(context_data, error_data, kError, NULL, 0,
@@ -5361,7 +5350,6 @@ TEST(ComputedPropertyNameShorthandError) {
NULL};
static const ParserFlag always_flags[] = {
- kAllowHarmonyComputedPropertyNames,
kAllowHarmonySloppy,
};
RunParserSyncTest(context_data, error_data, kError, NULL, 0,
@@ -5370,6 +5358,8 @@ TEST(ComputedPropertyNameShorthandError) {
TEST(BasicImportExportParsing) {
+ i::FLAG_harmony_modules = true;
+
const char* kSources[] = {
"export let x = 0;",
"export var y = 0;",
@@ -5429,7 +5419,6 @@ TEST(BasicImportExportParsing) {
i::Zone zone;
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
- parser.set_allow_harmony_modules(true);
info.set_module();
if (!parser.Parse(&info)) {
i::Handle<i::JSObject> exception_handle(
@@ -5455,7 +5444,6 @@ TEST(BasicImportExportParsing) {
i::Zone zone;
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
- parser.set_allow_harmony_modules(true);
info.set_global();
CHECK(!parser.Parse(&info));
}
@@ -5464,6 +5452,8 @@ TEST(BasicImportExportParsing) {
TEST(ImportExportParsingErrors) {
+ i::FLAG_harmony_modules = true;
+
const char* kErrorSources[] = {
"export {",
"var a; export { a",
@@ -5544,7 +5534,6 @@ TEST(ImportExportParsingErrors) {
i::Zone zone;
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
- parser.set_allow_harmony_modules(true);
info.set_module();
CHECK(!parser.Parse(&info));
}
@@ -5575,11 +5564,10 @@ TEST(ModuleParsingInternals) {
i::Zone zone;
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
- parser.set_allow_harmony_modules(true);
info.set_module();
CHECK(parser.Parse(&info));
CHECK(i::Compiler::Analyze(&info));
- i::FunctionLiteral* func = info.function();
+ i::FunctionLiteral* func = info.literal();
i::Scope* module_scope = func->scope();
i::Scope* outer_scope = module_scope->outer_scope();
CHECK(outer_scope->is_script_scope());
@@ -5587,7 +5575,7 @@ TEST(ModuleParsingInternals) {
CHECK_EQ(1, outer_scope->num_modules());
CHECK(module_scope->is_module_scope());
CHECK_NOT_NULL(module_scope->module_var());
- CHECK_EQ(i::INTERNAL, module_scope->module_var()->mode());
+ CHECK_EQ(i::TEMPORARY, module_scope->module_var()->mode());
i::ModuleDescriptor* descriptor = module_scope->module();
CHECK_NOT_NULL(descriptor);
CHECK_EQ(1, descriptor->Length());
@@ -5654,11 +5642,7 @@ TEST(DuplicateProtoNoError) {
NULL
};
- static const ParserFlag always_flags[] = {
- kAllowHarmonyComputedPropertyNames,
- };
- RunParserSyncTest(context_data, error_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, error_data, kSuccess);
}
@@ -5707,8 +5691,8 @@ void TestLanguageMode(const char* source,
parser.set_allow_strong_mode(true);
info.set_global();
parser.Parse(&info);
- CHECK(info.function() != NULL);
- CHECK_EQ(expected_language_mode, info.function()->language_mode());
+ CHECK(info.literal() != NULL);
+ CHECK_EQ(expected_language_mode, info.literal()->language_mode());
}
@@ -6397,7 +6381,6 @@ TEST(StrongModeFreeVariablesNotDeclared) {
TEST(DestructuringPositiveTests) {
i::FLAG_harmony_destructuring = true;
i::FLAG_harmony_arrow_functions = true;
- i::FLAG_harmony_computed_property_names = true;
const char* context_data[][2] = {{"'use strict'; let ", " = {};"},
{"var ", " = {};"},
@@ -6446,8 +6429,7 @@ TEST(DestructuringPositiveTests) {
"[a,,...rest]",
NULL};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonyComputedPropertyNames,
- kAllowHarmonyArrowFunctions,
+ static const ParserFlag always_flags[] = {kAllowHarmonyArrowFunctions,
kAllowHarmonyDestructuring};
RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
arraysize(always_flags));
@@ -6457,9 +6439,7 @@ TEST(DestructuringPositiveTests) {
TEST(DestructuringNegativeTests) {
i::FLAG_harmony_destructuring = true;
i::FLAG_harmony_arrow_functions = true;
- i::FLAG_harmony_computed_property_names = true;
- static const ParserFlag always_flags[] = {kAllowHarmonyComputedPropertyNames,
- kAllowHarmonyArrowFunctions,
+ static const ParserFlag always_flags[] = {kAllowHarmonyArrowFunctions,
kAllowHarmonyDestructuring};
{ // All modes.
@@ -6509,6 +6489,8 @@ TEST(DestructuringNegativeTests) {
"false",
"1",
"'abc'",
+ "/abc/",
+ "`abc`",
"class {}",
"{+2 : x}",
"{-2 : x}",
@@ -6529,6 +6511,10 @@ TEST(DestructuringNegativeTests) {
"[...rest,...rest1]",
"[a,b,...rest,...rest1]",
"[a,,..rest,...rest1]",
+ "{ x : 3 }",
+ "{ x : 'foo' }",
+ "{ x : /foo/ }",
+ "{ x : `foo` }",
NULL};
// clang-format on
RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
@@ -6622,9 +6608,7 @@ TEST(DestructuringDisallowPatternsInForVarIn) {
TEST(DestructuringDuplicateParams) {
i::FLAG_harmony_destructuring = true;
i::FLAG_harmony_arrow_functions = true;
- i::FLAG_harmony_computed_property_names = true;
- static const ParserFlag always_flags[] = {kAllowHarmonyComputedPropertyNames,
- kAllowHarmonyArrowFunctions,
+ static const ParserFlag always_flags[] = {kAllowHarmonyArrowFunctions,
kAllowHarmonyDestructuring};
const char* context_data[][2] = {{"'use strict';", ""},
{"function outer() { 'use strict';", "}"},
@@ -6652,9 +6636,7 @@ TEST(DestructuringDuplicateParams) {
TEST(DestructuringDuplicateParamsSloppy) {
i::FLAG_harmony_destructuring = true;
i::FLAG_harmony_arrow_functions = true;
- i::FLAG_harmony_computed_property_names = true;
- static const ParserFlag always_flags[] = {kAllowHarmonyComputedPropertyNames,
- kAllowHarmonyArrowFunctions,
+ static const ParserFlag always_flags[] = {kAllowHarmonyArrowFunctions,
kAllowHarmonyDestructuring};
const char* context_data[][2] = {
{"", ""}, {"function outer() {", "}"}, {nullptr, nullptr}};
@@ -6677,9 +6659,7 @@ TEST(DestructuringDuplicateParamsSloppy) {
TEST(DestructuringDisallowPatternsInSingleParamArrows) {
i::FLAG_harmony_destructuring = true;
i::FLAG_harmony_arrow_functions = true;
- i::FLAG_harmony_computed_property_names = true;
- static const ParserFlag always_flags[] = {kAllowHarmonyComputedPropertyNames,
- kAllowHarmonyArrowFunctions,
+ static const ParserFlag always_flags[] = {kAllowHarmonyArrowFunctions,
kAllowHarmonyDestructuring};
const char* context_data[][2] = {{"'use strict';", ""},
{"function outer() { 'use strict';", "}"},
@@ -6702,10 +6682,9 @@ TEST(DestructuringDisallowPatternsInRestParams) {
i::FLAG_harmony_destructuring = true;
i::FLAG_harmony_arrow_functions = true;
i::FLAG_harmony_rest_parameters = true;
- i::FLAG_harmony_computed_property_names = true;
- static const ParserFlag always_flags[] = {
- kAllowHarmonyComputedPropertyNames, kAllowHarmonyArrowFunctions,
- kAllowHarmonyRestParameters, kAllowHarmonyDestructuring};
+ static const ParserFlag always_flags[] = {kAllowHarmonyArrowFunctions,
+ kAllowHarmonyRestParameters,
+ kAllowHarmonyDestructuring};
const char* context_data[][2] = {{"'use strict';", ""},
{"function outer() { 'use strict';", "}"},
{"", ""},
@@ -6827,7 +6806,7 @@ TEST(NewTarget) {
}
-TEST(LegacyConst) {
+TEST(ConstLegacy) {
// clang-format off
const char* context_data[][2] = {
{"", ""},
@@ -6845,9 +6824,58 @@ TEST(LegacyConst) {
};
// clang-format on
- static const ParserFlag always_flags[] = {kNoLegacyConst};
+ static const ParserFlag always_flags[] = {kNoLegacyConst};
RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
arraysize(always_flags));
RunParserSyncTest(context_data, data, kSuccess);
}
+
+
+TEST(ConstSloppy) {
+ // clang-format off
+ const char* context_data[][2] = {
+ {"", ""},
+ {"{", "}"},
+ {NULL, NULL}
+ };
+
+ const char* data[] = {
+ "const x = 1",
+ "for (const x = 1; x < 1; x++) {}",
+ "for (const x in {}) {}",
+ "for (const x of []) {}",
+ NULL
+ };
+ // clang-format on
+ static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
+ kNoLegacyConst};
+ RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
+TEST(LetSloppy) {
+ // clang-format off
+ const char* context_data[][2] = {
+ {"", ""},
+ {"'use strict';", ""},
+ {"{", "}"},
+ {NULL, NULL}
+ };
+
+ const char* data[] = {
+ "let x",
+ "let x = 1",
+ "for (let x = 1; x < 1; x++) {}",
+ "for (let x in {}) {}",
+ "for (let x of []) {}",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
+ kAllowHarmonySloppyLet};
+ RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index e7fcbd10e0..f0b623f38c 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -32,55 +32,56 @@
#include "src/ast.h"
#include "src/char-predicates-inl.h"
-#include "src/jsregexp.h"
#include "src/ostreams.h"
#include "src/parser.h"
-#include "src/regexp-macro-assembler.h"
-#include "src/regexp-macro-assembler-irregexp.h"
+#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-macro-assembler-irregexp.h"
+#include "src/splay-tree-inl.h"
#include "src/string-stream.h"
#ifdef V8_INTERPRETED_REGEXP
-#include "src/interpreter-irregexp.h"
+#include "src/regexp/interpreter-irregexp.h"
#else // V8_INTERPRETED_REGEXP
#include "src/macro-assembler.h"
#if V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm.h" // NOLINT
#include "src/arm/macro-assembler-arm.h"
-#include "src/arm/regexp-macro-assembler-arm.h"
+#include "src/regexp/arm/regexp-macro-assembler-arm.h"
#endif
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
-#include "src/arm64/regexp-macro-assembler-arm64.h"
+#include "src/regexp/arm64/regexp-macro-assembler-arm64.h"
#endif
#if V8_TARGET_ARCH_PPC
#include "src/ppc/assembler-ppc.h"
#include "src/ppc/macro-assembler-ppc.h"
-#include "src/ppc/regexp-macro-assembler-ppc.h"
+#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
#endif
#if V8_TARGET_ARCH_MIPS
#include "src/mips/assembler-mips.h"
#include "src/mips/macro-assembler-mips.h"
-#include "src/mips/regexp-macro-assembler-mips.h"
+#include "src/regexp/mips/regexp-macro-assembler-mips.h"
#endif
#if V8_TARGET_ARCH_MIPS64
#include "src/mips64/assembler-mips64.h"
#include "src/mips64/macro-assembler-mips64.h"
-#include "src/mips64/regexp-macro-assembler-mips64.h"
+#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
#endif
#if V8_TARGET_ARCH_X64
+#include "src/regexp/x64/regexp-macro-assembler-x64.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
-#include "src/x64/regexp-macro-assembler-x64.h"
#endif
#if V8_TARGET_ARCH_IA32
#include "src/ia32/assembler-ia32.h"
#include "src/ia32/macro-assembler-ia32.h"
-#include "src/ia32/regexp-macro-assembler-ia32.h"
+#include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
#endif
#if V8_TARGET_ARCH_X87
+#include "src/regexp/x87/regexp-macro-assembler-x87.h"
#include "src/x87/assembler-x87.h"
#include "src/x87/macro-assembler-x87.h"
-#include "src/x87/regexp-macro-assembler-x87.h"
#endif
#endif // V8_INTERPRETED_REGEXP
#include "test/cctest/cctest.h"
@@ -413,7 +414,7 @@ static void ExpectError(const char* input,
CcTest::i_isolate(), &zone, &reader, false, false, &result));
CHECK(result.tree == NULL);
CHECK(!result.error.is_null());
- SmartArrayPointer<char> str = result.error->ToCString(ALLOW_NULLS);
+ v8::base::SmartArrayPointer<char> str = result.error->ToCString(ALLOW_NULLS);
CHECK_EQ(0, strcmp(expected, str.get()));
}
diff --git a/deps/v8/test/cctest/test-reloc-info.cc b/deps/v8/test/cctest/test-reloc-info.cc
index 829fd24f4d..e4d9fed7ec 100644
--- a/deps/v8/test/cctest/test-reloc-info.cc
+++ b/deps/v8/test/cctest/test-reloc-info.cc
@@ -45,7 +45,7 @@ TEST(Positions) {
const int code_size = 10 * KB;
int relocation_info_size = 10 * KB;
const int buffer_size = code_size + relocation_info_size;
- SmartArrayPointer<byte> buffer(new byte[buffer_size]);
+ v8::base::SmartArrayPointer<byte> buffer(new byte[buffer_size]);
byte* pc = buffer.get();
byte* buffer_end = buffer.get() + buffer_size;
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 0bae94e219..bf36081201 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -33,7 +33,7 @@
#include "src/bootstrapper.h"
#include "src/compilation-cache.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/heap/spaces.h"
#include "src/objects.h"
#include "src/parser.h"
@@ -628,7 +628,11 @@ UNINITIALIZED_DEPENDENT_TEST(CustomContextDeserialization,
root =
deserializer.DeserializePartial(isolate, global_proxy,
&outdated_contexts).ToHandleChecked();
- CHECK_EQ(3, outdated_contexts->length());
+ if (FLAG_global_var_shortcuts) {
+ CHECK_EQ(5, outdated_contexts->length());
+ } else {
+ CHECK_EQ(3, outdated_contexts->length());
+ }
CHECK(root->IsContext());
Handle<Context> context = Handle<Context>::cast(root);
CHECK(context->global_proxy() == *global_proxy);
diff --git a/deps/v8/test/cctest/test-simd.cc b/deps/v8/test/cctest/test-simd.cc
new file mode 100644
index 0000000000..fd72b695ee
--- /dev/null
+++ b/deps/v8/test/cctest/test-simd.cc
@@ -0,0 +1,117 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/objects.h"
+#include "src/ostreams.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+#define FLOAT_TEST(type, lane_count) \
+ { \
+ float nan = std::numeric_limits<float>::quiet_NaN(); \
+ float lanes[lane_count] = {0}; \
+ Handle<type> a = factory->New##type(lanes); \
+ Handle<type> b = factory->New##type(lanes); \
+ CHECK(a->BitwiseEquals(*b)); \
+ CHECK(a->SameValue(*b)); \
+ CHECK(a->SameValueZero(*b)); \
+ CHECK_EQ(a->Hash(), b->Hash()); \
+ for (int i = 0; i < lane_count; i++) { \
+ a->set_lane(i, -0.0); \
+ CHECK(!a->BitwiseEquals(*b)); \
+ CHECK_NE(a->Hash(), b->Hash()); \
+ CHECK(!a->SameValue(*b)); \
+ CHECK(a->SameValueZero(*b)); \
+ b->set_lane(i, -0.0); \
+ CHECK(a->BitwiseEquals(*b)); \
+ CHECK_EQ(a->Hash(), b->Hash()); \
+ CHECK(a->SameValue(*b)); \
+ CHECK(a->SameValueZero(*b)); \
+ a->set_lane(i, nan); \
+ CHECK(!a->BitwiseEquals(*b)); \
+ CHECK(!a->SameValue(*b)); \
+ CHECK(!a->SameValueZero(*b)); \
+ CHECK_NE(a->Hash(), b->Hash()); \
+ b->set_lane(i, nan); \
+ CHECK(a->BitwiseEquals(*b)); \
+ CHECK_EQ(a->Hash(), b->Hash()); \
+ CHECK(a->SameValue(*b)); \
+ CHECK(a->SameValueZero(*b)); \
+ } \
+ }
+
+#define INT_TEST(type, lane_count, lane_type) \
+ { \
+ lane_type lanes[lane_count] = {0}; \
+ Handle<type> a = factory->New##type(lanes); \
+ Handle<type> b = factory->New##type(lanes); \
+ CHECK(a->BitwiseEquals(*b)); \
+ CHECK(a->SameValue(*b)); \
+ CHECK(a->SameValueZero(*b)); \
+ CHECK_EQ(a->Hash(), b->Hash()); \
+ for (int i = 0; i < lane_count; i++) { \
+ a->set_lane(i, i + 1); \
+ CHECK(!a->BitwiseEquals(*b)); \
+ CHECK_NE(a->Hash(), b->Hash()); \
+ CHECK(!a->SameValue(*b)); \
+ CHECK(!a->SameValueZero(*b)); \
+ b->set_lane(i, i + 1); \
+ CHECK(a->BitwiseEquals(*b)); \
+ CHECK_EQ(a->Hash(), b->Hash()); \
+ CHECK(a->SameValue(*b)); \
+ CHECK(a->SameValueZero(*b)); \
+ a->set_lane(i, -(i + 1)); \
+ CHECK(!a->BitwiseEquals(*b)); \
+ CHECK_NE(a->Hash(), b->Hash()); \
+ CHECK(!a->SameValue(*b)); \
+ CHECK(!a->SameValueZero(*b)); \
+ b->set_lane(i, -(i + 1)); \
+ CHECK(a->BitwiseEquals(*b)); \
+ CHECK_EQ(a->Hash(), b->Hash()); \
+ CHECK(a->SameValue(*b)); \
+ CHECK(a->SameValueZero(*b)); \
+ } \
+ }
+
+#define BOOL_TEST(type, lane_count) \
+ { \
+ bool lanes[lane_count] = {false}; \
+ Handle<type> a = factory->New##type(lanes); \
+ Handle<type> b = factory->New##type(lanes); \
+ CHECK(a->BitwiseEquals(*b)); \
+ CHECK(a->SameValue(*b)); \
+ CHECK(a->SameValueZero(*b)); \
+ CHECK_EQ(a->Hash(), b->Hash()); \
+ for (int i = 0; i < lane_count; i++) { \
+ a->set_lane(i, true); \
+ CHECK(!a->BitwiseEquals(*b)); \
+ CHECK_NE(a->Hash(), b->Hash()); \
+ CHECK(!a->SameValue(*b)); \
+ CHECK(!a->SameValueZero(*b)); \
+ b->set_lane(i, true); \
+ CHECK(a->BitwiseEquals(*b)); \
+ CHECK_EQ(a->Hash(), b->Hash()); \
+ CHECK(a->SameValue(*b)); \
+ CHECK(a->SameValueZero(*b)); \
+ } \
+ }
+
+TEST(SimdTypes) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+
+ HandleScope sc(isolate);
+
+ FLOAT_TEST(Float32x4, 4)
+ INT_TEST(Int32x4, 4, int32_t)
+ BOOL_TEST(Bool32x4, 4)
+ INT_TEST(Int16x8, 8, int16_t)
+ BOOL_TEST(Bool16x8, 8)
+ INT_TEST(Int8x16, 16, int8_t)
+ BOOL_TEST(Bool8x16, 16)
+}
diff --git a/deps/v8/test/cctest/test-spaces.cc b/deps/v8/test/cctest/test-spaces.cc
index 3f5e437223..86500c52d3 100644
--- a/deps/v8/test/cctest/test-spaces.cc
+++ b/deps/v8/test/cctest/test-spaces.cc
@@ -309,7 +309,7 @@ TEST(MemoryAllocator) {
heap->MaxExecutableSize()));
int total_pages = 0;
- OldSpace faked_space(heap, heap->MaxReserved(), OLD_SPACE, NOT_EXECUTABLE);
+ OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
Page* first_page = memory_allocator->AllocatePage(
faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
@@ -379,8 +379,7 @@ TEST(OldSpace) {
heap->MaxExecutableSize()));
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
- OldSpace* s = new OldSpace(heap, heap->MaxOldGenerationSize(), OLD_SPACE,
- NOT_EXECUTABLE);
+ OldSpace* s = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
CHECK(s != NULL);
CHECK(s->SetUp());
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index d8d7c96871..ce60b95495 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -1085,8 +1085,9 @@ TEST(CachedHashOverflow) {
CHECK_EQ(results[i]->IsUndefined(), result->IsUndefined());
CHECK_EQ(results[i]->IsNumber(), result->IsNumber());
if (result->IsNumber()) {
- CHECK_EQ(Object::ToSmi(isolate, results[i]).ToHandleChecked()->value(),
- result->ToInt32(CcTest::isolate())->Value());
+ int32_t value = 0;
+ CHECK(results[i]->ToInt32(&value));
+ CHECK_EQ(value, result->ToInt32(CcTest::isolate())->Value());
}
}
}
@@ -1209,8 +1210,8 @@ TEST(SliceFromSlice) {
UNINITIALIZED_TEST(OneByteArrayJoin) {
v8::Isolate::CreateParams create_params;
// Set heap limits.
- create_params.constraints.set_max_semi_space_size(1);
- create_params.constraints.set_max_old_space_size(6);
+ create_params.constraints.set_max_semi_space_size(1 * Page::kPageSize / MB);
+ create_params.constraints.set_max_old_space_size(6 * Page::kPageSize / MB);
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
isolate->Enter();
diff --git a/deps/v8/test/cctest/test-threads.cc b/deps/v8/test/cctest/test-threads.cc
index 5f2cdae2a2..a9058a523a 100644
--- a/deps/v8/test/cctest/test-threads.cc
+++ b/deps/v8/test/cctest/test-threads.cc
@@ -32,88 +32,6 @@
#include "src/isolate.h"
-enum Turn { FILL_CACHE, CLEAN_CACHE, SECOND_TIME_FILL_CACHE, CACHE_DONE };
-
-static Turn turn = FILL_CACHE;
-
-
-class ThreadA : public v8::base::Thread {
- public:
- ThreadA() : Thread(Options("ThreadA")) {}
- void Run() {
- v8::Isolate* isolate = CcTest::isolate();
- v8::Locker locker(isolate);
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope scope(isolate);
- v8::Handle<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
-
- CHECK_EQ(FILL_CACHE, turn);
-
- // Fill String.search cache.
- v8::Handle<v8::Script> script = v8::Script::Compile(
- v8::String::NewFromUtf8(
- isolate,
- "for (var i = 0; i < 3; i++) {"
- " var result = \"a\".search(\"a\");"
- " if (result != 0) throw \"result: \" + result + \" @\" + i;"
- "};"
- "true"));
- CHECK(script->Run()->IsTrue());
-
- turn = CLEAN_CACHE;
- do {
- {
- v8::Unlocker unlocker(CcTest::isolate());
- }
- } while (turn != SECOND_TIME_FILL_CACHE);
-
- // Rerun the script.
- CHECK(script->Run()->IsTrue());
-
- turn = CACHE_DONE;
- }
-};
-
-
-class ThreadB : public v8::base::Thread {
- public:
- ThreadB() : Thread(Options("ThreadB")) {}
- void Run() {
- do {
- {
- v8::Isolate* isolate = CcTest::isolate();
- v8::Locker locker(isolate);
- v8::Isolate::Scope isolate_scope(isolate);
- if (turn == CLEAN_CACHE) {
- v8::HandleScope scope(isolate);
- v8::Handle<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
-
- // Clear the caches by forcing major GC.
- CcTest::heap()->CollectAllGarbage();
- turn = SECOND_TIME_FILL_CACHE;
- break;
- }
- }
- } while (true);
- }
-};
-
-
-TEST(JSFunctionResultCachesInTwoThreads) {
- ThreadA threadA;
- ThreadB threadB;
-
- threadA.Start();
- threadB.Start();
-
- threadA.Join();
- threadB.Join();
-
- CHECK_EQ(CACHE_DONE, turn);
-}
-
class ThreadIdValidationThread : public v8::base::Thread {
public:
ThreadIdValidationThread(v8::base::Thread* thread_to_start,
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 4746e47322..3a629bdca0 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -634,7 +634,7 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppend(
descriptors->Append(&f);
int field_index = f.GetDetails().field_index();
- bool is_inobject = field_index < map->inobject_properties();
+ bool is_inobject = field_index < map->GetInObjectProperties();
for (int bit = 0; bit < field_width_in_words; bit++) {
CHECK_EQ(is_inobject && (kind == PROP_DOUBLE),
!layout_descriptor->IsTagged(field_index + bit));
@@ -763,7 +763,7 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppendIfFastOrUseFull(
int field_index = details.field_index();
int field_width_in_words = details.field_width_in_words();
- bool is_inobject = field_index < map->inobject_properties();
+ bool is_inobject = field_index < map->GetInObjectProperties();
for (int bit = 0; bit < field_width_in_words; bit++) {
CHECK_EQ(is_inobject && details.representation().IsDouble(),
!layout_desc->IsTagged(field_index + bit));
@@ -1017,7 +1017,7 @@ TEST(DoScavenge) {
INSERT_TRANSITION).ToHandleChecked();
// Create object in new space.
- Handle<JSObject> obj = factory->NewJSObjectFromMap(map, NOT_TENURED, false);
+ Handle<JSObject> obj = factory->NewJSObjectFromMap(map, NOT_TENURED);
Handle<HeapNumber> heap_number = factory->NewHeapNumber(42.5);
obj->WriteToField(0, *heap_number);
@@ -1094,7 +1094,7 @@ TEST(DoScavengeWithIncrementalWriteBarrier) {
}
// Create object in new space.
- Handle<JSObject> obj = factory->NewJSObjectFromMap(map, NOT_TENURED, false);
+ Handle<JSObject> obj = factory->NewJSObjectFromMap(map, NOT_TENURED);
Handle<HeapNumber> heap_number = factory->NewHeapNumber(42.5);
obj->WriteToField(0, *heap_number);
@@ -1351,7 +1351,7 @@ TEST(StoreBufferScanOnScavenge) {
INSERT_TRANSITION).ToHandleChecked();
// Create object in new space.
- Handle<JSObject> obj = factory->NewJSObjectFromMap(map, NOT_TENURED, false);
+ Handle<JSObject> obj = factory->NewJSObjectFromMap(map, NOT_TENURED);
Handle<HeapNumber> heap_number = factory->NewHeapNumber(42.5);
obj->WriteToField(0, *heap_number);
@@ -1423,9 +1423,6 @@ TEST(WriteBarriersInCopyJSObject) {
my_map = Map::CopyWithField(my_map, name, HeapType::Any(isolate), NONE,
Representation::Double(),
INSERT_TRANSITION).ToHandleChecked();
- my_map->set_pre_allocated_property_fields(1);
- int n_properties = my_map->InitialPropertiesLength();
- CHECK_GE(n_properties, 0);
int object_size = my_map->instance_size();
@@ -1503,7 +1500,7 @@ static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
Handle<HeapObject> obj_value;
{
AlwaysAllocateScope always_allocate(isolate);
- obj = factory->NewJSObjectFromMap(map, TENURED, false);
+ obj = factory->NewJSObjectFromMap(map, TENURED);
CHECK(old_space->Contains(*obj));
obj_value = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS);
@@ -1568,7 +1565,7 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
Page* ec_page;
{
AlwaysAllocateScope always_allocate(isolate);
- obj = factory->NewJSObjectFromMap(map, TENURED, false);
+ obj = factory->NewJSObjectFromMap(map, TENURED);
CHECK(old_space->Contains(*obj));
// Make sure |obj_value| is placed on an old-space evacuation candidate.
diff --git a/deps/v8/test/intl/number-format/check-minimum-fraction-digits.js b/deps/v8/test/intl/number-format/check-minimum-fraction-digits.js
new file mode 100755
index 0000000000..57e65be55e
--- /dev/null
+++ b/deps/v8/test/intl/number-format/check-minimum-fraction-digits.js
@@ -0,0 +1,9 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Make sure minimumFractionDigits is honored
+
+var nf = new Intl.NumberFormat("en-us",{ useGrouping: false, minimumFractionDigits: 4});
+
+assertEquals("12345.6789", nf.format(12345.6789));
diff --git a/deps/v8/test/intl/number-format/format-currency.js b/deps/v8/test/intl/number-format/format-currency.js
new file mode 100755
index 0000000000..004c566ce4
--- /dev/null
+++ b/deps/v8/test/intl/number-format/format-currency.js
@@ -0,0 +1,19 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Make sure currency formatting is correct (for USD only displays two decimal
+// places, for JPY 0, and for EUR 2).
+
+var nf_USD = new Intl.NumberFormat(['en'], {style: 'currency', currency: 'USD'});
+
+assertEquals("$54,306.40", nf_USD.format(parseFloat(54306.4047970)));
+
+var nf_JPY = new Intl.NumberFormat(['ja'],
+ {style: 'currency', currency: 'JPY', currencyDisplay: "code"});
+
+assertEquals("JPY54,306", nf_JPY.format(parseFloat(54306.4047970)));
+
+var nf_EUR = new Intl.NumberFormat(['pt'], {style: 'currency', currency: 'EUR'});
+
+assertEquals("€1.000,00", nf_EUR.format(1000.00));
diff --git a/deps/v8/test/intl/string/normalization.js b/deps/v8/test/intl/string/normalization.js
index 446d6277db..25d314ea28 100644
--- a/deps/v8/test/intl/string/normalization.js
+++ b/deps/v8/test/intl/string/normalization.js
@@ -27,6 +27,8 @@
// Tests the new String.prototype.normalize method.
+assertEquals(String.prototype.normalize.length, 0);
+assertEquals(String.prototype.propertyIsEnumerable("normalize"), false);
// Common use case when searching for 'not very exact' match.
// These are examples of data one might encounter in real use.
diff --git a/deps/v8/test/message/arrow-formal-parameters.js b/deps/v8/test/message/arrow-formal-parameters.js
new file mode 100644
index 0000000000..edc0c58053
--- /dev/null
+++ b/deps/v8/test/message/arrow-formal-parameters.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-arrow-functions
+
+(b, a, a, d) => a
diff --git a/deps/v8/test/message/arrow-formal-parameters.out b/deps/v8/test/message/arrow-formal-parameters.out
new file mode 100644
index 0000000000..ee918493f5
--- /dev/null
+++ b/deps/v8/test/message/arrow-formal-parameters.out
@@ -0,0 +1,4 @@
+*%(basename)s:7: SyntaxError: Duplicate parameter name not allowed in this context
+(b, a, a, d) => a
+ ^
+SyntaxError: Duplicate parameter name not allowed in this context
diff --git a/deps/v8/test/message/for-loop-invalid-lhs.js b/deps/v8/test/message/for-loop-invalid-lhs.js
new file mode 100644
index 0000000000..c545230348
--- /dev/null
+++ b/deps/v8/test/message/for-loop-invalid-lhs.js
@@ -0,0 +1,9 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// TODO(adamk): Remove flag after the test runner tests all message tests with
+// the preparser: https://code.google.com/p/v8/issues/detail?id=4372
+// Flags: --min-preparse-length=0
+
+function f() { for ("unassignable" in {}); }
diff --git a/deps/v8/test/message/for-loop-invalid-lhs.out b/deps/v8/test/message/for-loop-invalid-lhs.out
new file mode 100644
index 0000000000..1972146f87
--- /dev/null
+++ b/deps/v8/test/message/for-loop-invalid-lhs.out
@@ -0,0 +1,4 @@
+*%(basename)s:9: SyntaxError: Invalid left-hand side in for-loop
+function f() { for ("unassignable" in {}); }
+ ^^^^^^^^^^^^^^
+SyntaxError: Invalid left-hand side in for-loop
diff --git a/deps/v8/test/message/new-target-assignment.js b/deps/v8/test/message/new-target-assignment.js
new file mode 100644
index 0000000000..f257d1a5b5
--- /dev/null
+++ b/deps/v8/test/message/new-target-assignment.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-new-target
+
+function f() { new.target = 5 }
diff --git a/deps/v8/test/message/new-target-assignment.out b/deps/v8/test/message/new-target-assignment.out
new file mode 100644
index 0000000000..5431bd0fc0
--- /dev/null
+++ b/deps/v8/test/message/new-target-assignment.out
@@ -0,0 +1,4 @@
+*%(basename)s:7: ReferenceError: Invalid left-hand side in assignment
+function f() { new.target = 5 }
+ ^^^^^^^^^^
+ReferenceError: Invalid left-hand side in assignment
diff --git a/deps/v8/test/message/new-target-for-loop.js b/deps/v8/test/message/new-target-for-loop.js
new file mode 100644
index 0000000000..76a8eecba2
--- /dev/null
+++ b/deps/v8/test/message/new-target-for-loop.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-new-target
+
+function f() { for (new.target in {}); }
diff --git a/deps/v8/test/message/new-target-for-loop.out b/deps/v8/test/message/new-target-for-loop.out
new file mode 100644
index 0000000000..342b1315e9
--- /dev/null
+++ b/deps/v8/test/message/new-target-for-loop.out
@@ -0,0 +1,4 @@
+*%(basename)s:7: SyntaxError: Invalid left-hand side in for-loop
+function f() { for (new.target in {}); }
+ ^^^^^^^^^^
+SyntaxError: Invalid left-hand side in for-loop
diff --git a/deps/v8/test/message/new-target-postfix-op.js b/deps/v8/test/message/new-target-postfix-op.js
new file mode 100644
index 0000000000..573f3ae3a7
--- /dev/null
+++ b/deps/v8/test/message/new-target-postfix-op.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-new-target
+
+function f() { new.target++ }
diff --git a/deps/v8/test/message/new-target-postfix-op.out b/deps/v8/test/message/new-target-postfix-op.out
new file mode 100644
index 0000000000..17f2081ed6
--- /dev/null
+++ b/deps/v8/test/message/new-target-postfix-op.out
@@ -0,0 +1,4 @@
+*%(basename)s:7: ReferenceError: Invalid left-hand side expression in postfix operation
+function f() { new.target++ }
+ ^^^^^^^^^^
+ReferenceError: Invalid left-hand side expression in postfix operation
diff --git a/deps/v8/test/message/new-target-prefix-op.js b/deps/v8/test/message/new-target-prefix-op.js
new file mode 100644
index 0000000000..ad2f98a46c
--- /dev/null
+++ b/deps/v8/test/message/new-target-prefix-op.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-new-target
+
+function f() { ++new.target }
diff --git a/deps/v8/test/message/new-target-prefix-op.out b/deps/v8/test/message/new-target-prefix-op.out
new file mode 100644
index 0000000000..19c847f23e
--- /dev/null
+++ b/deps/v8/test/message/new-target-prefix-op.out
@@ -0,0 +1,4 @@
+*%(basename)s:7: ReferenceError: Invalid left-hand side expression in prefix operation
+function f() { ++new.target }
+ ^^^^^^^^^^
+ReferenceError: Invalid left-hand side expression in prefix operation
diff --git a/deps/v8/test/message/strict-formal-parameters.out b/deps/v8/test/message/strict-formal-parameters.out
index 3648d38586..3ea3f233b7 100644
--- a/deps/v8/test/message/strict-formal-parameters.out
+++ b/deps/v8/test/message/strict-formal-parameters.out
@@ -1,4 +1,4 @@
-*%(basename)s:6: SyntaxError: Strict mode function may not have duplicate parameter names
+*%(basename)s:6: SyntaxError: Duplicate parameter name not allowed in this context
function foo(b, a, a, d) { return a }
^
-SyntaxError: Strict mode function may not have duplicate parameter names
+SyntaxError: Duplicate parameter name not allowed in this context
diff --git a/deps/v8/test/mjsunit/array-functions-prototype-misc.js b/deps/v8/test/mjsunit/array-functions-prototype-misc.js
index 74dc9a6be0..a2c1410837 100644
--- a/deps/v8/test/mjsunit/array-functions-prototype-misc.js
+++ b/deps/v8/test/mjsunit/array-functions-prototype-misc.js
@@ -312,3 +312,75 @@ Array.prototype[1] = undefined;
// Test http://code.google.com/p/chromium/issues/detail?id=21860
Array.prototype.push.apply([], [1].splice(0, -(-1 % 5)));
+
+
+// Check that the Array functions work also properly on non-Arrays
+var receiver;
+
+receiver = 'a string';
+assertThrows(function(){
+ Array.prototype.push.call(receiver);
+});
+
+receiver = 0;
+assertEquals(undefined, receiver.length);
+assertEquals(0, Array.prototype.push.call(receiver));
+assertEquals(1, Array.prototype.push.call(receiver, 'first'));
+assertEquals(undefined, receiver.length);
+
+receiver = {};
+assertEquals(undefined, receiver.length);
+assertEquals(0, Array.prototype.push.call(receiver));
+assertEquals(0, Array.prototype.push.call(receiver));
+assertEquals(0, receiver.length);
+assertEquals(1, Array.prototype.push.call(receiver, 'first'));
+assertEquals(1, receiver.length);
+assertEquals('first', receiver[0]);
+assertEquals(2, Array.prototype.push.call(receiver, 'second'));
+assertEquals(2, receiver.length);
+assertEquals('first', receiver[0]);
+assertEquals('second', receiver[1]);
+
+receiver = {'length': 10};
+assertEquals(10, Array.prototype.push.call(receiver));
+assertEquals(10, receiver.length);
+assertEquals(11, Array.prototype.push.call(receiver, 'first'));
+assertEquals(11, receiver.length);
+assertEquals('first', receiver[10]);
+assertEquals(13, Array.prototype.push.call(receiver, 'second', 'third'));
+assertEquals(13, receiver.length);
+assertEquals('first', receiver[10]);
+assertEquals('second', receiver[11]);
+assertEquals('third', receiver[12]);
+
+receiver = {
+ get length() { return 10; },
+ set length(l) {}
+};
+assertEquals(10, Array.prototype.push.call(receiver));
+assertEquals(10, receiver.length);
+assertEquals(11, Array.prototype.push.call(receiver, 'first'));
+assertEquals(10, receiver.length);
+assertEquals('first', receiver[10]);
+assertEquals(12, Array.prototype.push.call(receiver, 'second', 'third'));
+assertEquals(10, receiver.length);
+assertEquals('second', receiver[10]);
+assertEquals('third', receiver[11]);
+
+// readonly length
+receiver = {
+ get length() { return 10; },
+};
+assertThrows(function(){
+ Array.prototype.push.call(receiver);
+});
+
+receiver = {
+ set length(l) {}
+};
+assertEquals(0, Array.prototype.push.call(receiver));
+assertEquals(undefined, receiver.length);
+assertEquals(1, Array.prototype.push.call(receiver, 'first'));
+assertEquals(undefined, receiver.length);
+assertEquals(2, Array.prototype.push.call(receiver, 'third', 'second'));
+assertEquals(undefined, receiver.length);
diff --git a/deps/v8/test/mjsunit/array-push7.js b/deps/v8/test/mjsunit/array-push7.js
index b45a739d70..68c3a2a76e 100644
--- a/deps/v8/test/mjsunit/array-push7.js
+++ b/deps/v8/test/mjsunit/array-push7.js
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-object-observe
// Flags: --allow-natives-syntax
var v = 0;
diff --git a/deps/v8/test/mjsunit/compiler/string-length.js b/deps/v8/test/mjsunit/compiler/string-length.js
new file mode 100644
index 0000000000..855a1a6b71
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/string-length.js
@@ -0,0 +1,31 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+assertEquals(0, "".length);
+assertEquals(1, "a".length);
+assertEquals(2, ("a" + "b").length);
+
+function id(x) { return x; }
+
+function f1(x) {
+ return x.length;
+}
+assertEquals(0, f1(""));
+assertEquals(1, f1("a"));
+%OptimizeFunctionOnNextCall(f1);
+assertEquals(2, f1("a" + "b"));
+assertEquals(3, f1(id("a") + id("b" + id("c"))))
+
+function f2(x, y, z) {
+ x = x ? "" + y : "" + z;
+ return x.length;
+}
+assertEquals(0, f2(true, "", "a"));
+assertEquals(1, f2(false, "", "a"));
+%OptimizeFunctionOnNextCall(f2);
+assertEquals(0, f2(true, "", "a"));
+assertEquals(1, f2(false, "", "a"));
+assertEquals(3, f2(true, id("a") + id("b" + id("c")), ""));
diff --git a/deps/v8/test/mjsunit/compiler/stubs/floor-stub.js b/deps/v8/test/mjsunit/compiler/stubs/floor-stub.js
index e3fc9b6003..0a76d307ba 100644
--- a/deps/v8/test/mjsunit/compiler/stubs/floor-stub.js
+++ b/deps/v8/test/mjsunit/compiler/stubs/floor-stub.js
@@ -25,30 +25,37 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --expose-natives-as=builtins --noalways-opt
+// Flags: --allow-natives-syntax --noalways-opt --turbo-filter=*
+
+var stubs = %GetCodeStubExportsObject();
const kExtraTypeFeedbackMinusZeroSentinel = 1;
+const kFirstJSFunctionTypeFeedbackIndex = 5;
const kFirstSlotExtraTypeFeedbackIndex = 5;
-(function(){
- var floorFunc = function() {
- Math.floor(NaN);
+(function() {
+ var stub1 = stubs.MathFloorStub("MathFloorStub", 1);
+ var tempForTypeVector = function(d) {
+ return Math.round(d);
}
- // Execute the function once to make sure it has a type feedback vector.
- floorFunc(5);
- var stub = builtins.MathFloorStub("MathFloorStub", 0);
+ tempForTypeVector(5);
+ var tv = %GetTypeFeedbackVector(tempForTypeVector);
+ var floorFunc1 = function(v, first) {
+ if (first) return;
+ return stub1(stub1, kFirstSlotExtraTypeFeedbackIndex - 1, tv, undefined, v);
+ };
+ %OptimizeFunctionOnNextCall(stub1);
+ floorFunc1(5, true);
+ %FixedArraySet(tv, kFirstSlotExtraTypeFeedbackIndex - 1, stub1);
assertTrue(kExtraTypeFeedbackMinusZeroSentinel !==
- %FixedArrayGet(%GetTypeFeedbackVector(floorFunc),
- kFirstSlotExtraTypeFeedbackIndex));
- assertEquals(5.0, stub(floorFunc, 4, 5.5));
+ %FixedArrayGet(tv, kFirstSlotExtraTypeFeedbackIndex));
+ assertEquals(5.0, floorFunc1(5.5));
assertTrue(kExtraTypeFeedbackMinusZeroSentinel !==
- %FixedArrayGet(%GetTypeFeedbackVector(floorFunc),
- kFirstSlotExtraTypeFeedbackIndex));
+ %FixedArrayGet(tv, kFirstSlotExtraTypeFeedbackIndex));
// Executing floor such that it returns -0 should set the proper sentinel in
// the feedback vector.
- assertEquals(-Infinity, 1/stub(floorFunc, 4, -0));
+ assertEquals(-Infinity, 1/floorFunc1(-0));
assertEquals(kExtraTypeFeedbackMinusZeroSentinel,
- %FixedArrayGet(%GetTypeFeedbackVector(floorFunc),
- kFirstSlotExtraTypeFeedbackIndex));
- %ClearFunctionTypeFeedback(floorFunc);
+ %FixedArrayGet(tv, kFirstSlotExtraTypeFeedbackIndex));
+ %ClearFunctionTypeFeedback(floorFunc1);
})();
diff --git a/deps/v8/test/mjsunit/d8-worker-sharedarraybuffer.js b/deps/v8/test/mjsunit/d8-worker-sharedarraybuffer.js
index 791529fc89..d432f97074 100644
--- a/deps/v8/test/mjsunit/d8-worker-sharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/d8-worker-sharedarraybuffer.js
@@ -27,42 +27,80 @@
// Flags: --harmony-sharedarraybuffer --harmony-atomics
-var workerScript =
- `onmessage = function(m) {
- var sab = m;
- var ta = new Uint32Array(sab);
- if (sab.byteLength !== 16) {
- throw new Error('SharedArrayBuffer transfer byteLength');
- }
- for (var i = 0; i < 4; ++i) {
- if (ta[i] !== i) {
- throw new Error('SharedArrayBuffer transfer value ' + i);
- }
- }
- // Atomically update ta[0]
- Atomics.store(ta, 0, 100);
- };`;
-
if (this.Worker) {
- var w = new Worker(workerScript);
- var sab = new SharedArrayBuffer(16);
- var ta = new Uint32Array(sab);
- for (var i = 0; i < 4; ++i) {
- ta[i] = i;
- }
+ (function TestTransfer() {
+ var workerScript =
+ `onmessage = function(m) {
+ var sab = m;
+ var ta = new Uint32Array(sab);
+ if (sab.byteLength !== 16) {
+ throw new Error('SharedArrayBuffer transfer byteLength');
+ }
+ for (var i = 0; i < 4; ++i) {
+ if (ta[i] !== i) {
+ throw new Error('SharedArrayBuffer transfer value ' + i);
+ }
+ }
+ // Atomically update ta[0]
+ Atomics.store(ta, 0, 100);
+ };`;
+
+ var w = new Worker(workerScript);
+
+ var sab = new SharedArrayBuffer(16);
+ var ta = new Uint32Array(sab);
+ for (var i = 0; i < 4; ++i) {
+ ta[i] = i;
+ }
+
+ // Transfer SharedArrayBuffer
+ w.postMessage(sab, [sab]);
+ assertEquals(16, sab.byteLength); // ArrayBuffer should not be neutered.
+
+ // Spinwait for the worker to update ta[0]
+ var ta0;
+ while ((ta0 = Atomics.load(ta, 0)) == 0) {}
+
+ assertEquals(100, ta0);
+
+ w.terminate();
+
+ assertEquals(16, sab.byteLength); // Still not neutered.
+ })();
- // Transfer SharedArrayBuffer
- w.postMessage(sab, [sab]);
- assertEquals(16, sab.byteLength); // ArrayBuffer should not be neutered.
+ (function TestTransferMulti() {
+ var workerScript =
+ `onmessage = function(msg) {
+ var sab = msg.sab;
+ var id = msg.id;
+ var ta = new Uint32Array(sab);
+ Atomics.store(ta, id, 1);
+ postMessage(id);
+ };`;
- // Spinwait for the worker to update ta[0]
- var ta0;
- while ((ta0 = Atomics.load(ta, 0)) == 0) {}
+ var sab = new SharedArrayBuffer(16);
+ var ta = new Uint32Array(sab);
- assertEquals(100, ta0);
+ var id;
+ var workers = [];
+ for (id = 0; id < 4; ++id) {
+ workers[id] = new Worker(workerScript);
+ workers[id].postMessage({sab: sab, id: id}, [sab]);
+ }
- w.terminate();
+ // Spinwait for each worker to update ta[id]
+ var count = 0;
+ while (count < 4) {
+ for (id = 0; id < 4; ++id) {
+ if (Atomics.compareExchange(ta, id, 1, -1) == 1) {
+ // Worker is finished.
+ assertEquals(id, workers[id].getMessage());
+ workers[id].terminate();
+ count++;
+ }
+ }
+ }
+ })();
- assertEquals(16, sab.byteLength); // Still not neutered.
}
diff --git a/deps/v8/test/mjsunit/date-parse.js b/deps/v8/test/mjsunit/date-parse.js
index cb4a951c7a..4cd8aa9c3e 100644
--- a/deps/v8/test/mjsunit/date-parse.js
+++ b/deps/v8/test/mjsunit/date-parse.js
@@ -244,14 +244,22 @@ var testCasesES5Misc = [
['2000-01T08:00:00.001Z', 946713600001],
['2000-01T08:00:00.099Z', 946713600099],
['2000-01T08:00:00.999Z', 946713600999],
- ['2000-01T00:00:00.001-08:00', 946713600001]];
+ ['2000-01T00:00:00.001-08:00', 946713600001],
+ ['2000-01-01T24:00Z', 946771200000],
+ ['2000-01-01T24:00:00Z', 946771200000],
+ ['2000-01-01T24:00:00.000Z', 946771200000],
+ ['2000-01-01T24:00:00.000Z', 946771200000]];
var testCasesES5MiscNegative = [
'2000-01-01TZ',
'2000-01-01T60Z',
'2000-01-01T60:60Z',
'2000-01-0108:00Z',
- '2000-01-01T08Z'];
+ '2000-01-01T08Z',
+ '2000-01-01T24:01',
+ '2000-01-01T24:00:01',
+ '2000-01-01T24:00:00.001',
+ '2000-01-01T24:00:00.999Z'];
// Run all the tests.
diff --git a/deps/v8/test/mjsunit/date.js b/deps/v8/test/mjsunit/date.js
index 0fa23f8de1..adebbd141f 100644
--- a/deps/v8/test/mjsunit/date.js
+++ b/deps/v8/test/mjsunit/date.js
@@ -203,110 +203,110 @@ assertEquals(-8640000000000000, Date.UTC(1970, 0, 1 - 100000001, 24));
// Parsing ES5 ISO-8601 dates.
-// When TZ is omitted, it defaults to 'Z' meaning UTC.
+// When TZ is omitted, it defaults to the local timezone
// Check epoch.
assertEquals(0, Date.parse("1970-01-01T00:00:00.000+00:00"));
assertEquals(0, Date.parse("1970-01-01T00:00:00.000-00:00"));
assertEquals(0, Date.parse("1970-01-01T00:00:00.000Z"));
-assertEquals(0, Date.parse("1970-01-01T00:00:00.000"));
-assertEquals(0, Date.parse("1970-01-01T00:00:00"));
-assertEquals(0, Date.parse("1970-01-01T00:00"));
-assertEquals(0, Date.parse("1970-01-01"));
+assertEquals(0, Date.parse("1970-01-01T00:00:00.000Z"));
+assertEquals(0, Date.parse("1970-01-01T00:00:00Z"));
+assertEquals(0, Date.parse("1970-01-01T00:00Z"));
+assertEquals(0, Date.parse("1970-01-01Z"));
assertEquals(0, Date.parse("1970-01T00:00:00.000+00:00"));
assertEquals(0, Date.parse("1970-01T00:00:00.000-00:00"));
assertEquals(0, Date.parse("1970-01T00:00:00.000Z"));
-assertEquals(0, Date.parse("1970-01T00:00:00.000"));
-assertEquals(0, Date.parse("1970-01T00:00:00"));
-assertEquals(0, Date.parse("1970-01T00:00"));
-assertEquals(0, Date.parse("1970-01"));
+assertEquals(0, Date.parse("1970-01T00:00:00.000Z"));
+assertEquals(0, Date.parse("1970-01T00:00:00Z"));
+assertEquals(0, Date.parse("1970-01T00:00Z"));
+assertEquals(0, Date.parse("1970-01Z"));
assertEquals(0, Date.parse("1970T00:00:00.000+00:00"));
assertEquals(0, Date.parse("1970T00:00:00.000-00:00"));
assertEquals(0, Date.parse("1970T00:00:00.000Z"));
-assertEquals(0, Date.parse("1970T00:00:00.000"));
-assertEquals(0, Date.parse("1970T00:00:00"));
-assertEquals(0, Date.parse("1970T00:00"));
-assertEquals(0, Date.parse("1970"));
+assertEquals(0, Date.parse("1970T00:00:00.000Z"));
+assertEquals(0, Date.parse("1970T00:00:00Z"));
+assertEquals(0, Date.parse("1970T00:00Z"));
+assertEquals(0, Date.parse("1970Z"));
assertEquals(0, Date.parse("+001970-01-01T00:00:00.000+00:00"));
assertEquals(0, Date.parse("+001970-01-01T00:00:00.000-00:00"));
assertEquals(0, Date.parse("+001970-01-01T00:00:00.000Z"));
-assertEquals(0, Date.parse("+001970-01-01T00:00:00.000"));
-assertEquals(0, Date.parse("+001970-01-01T00:00:00"));
-assertEquals(0, Date.parse("+001970-01-01T00:00"));
-assertEquals(0, Date.parse("+001970-01-01"));
+assertEquals(0, Date.parse("+001970-01-01T00:00:00.000Z"));
+assertEquals(0, Date.parse("+001970-01-01T00:00:00Z"));
+assertEquals(0, Date.parse("+001970-01-01T00:00Z"));
+assertEquals(0, Date.parse("+001970-01-01Z"));
assertEquals(0, Date.parse("+001970-01T00:00:00.000+00:00"));
assertEquals(0, Date.parse("+001970-01T00:00:00.000-00:00"));
assertEquals(0, Date.parse("+001970-01T00:00:00.000Z"));
-assertEquals(0, Date.parse("+001970-01T00:00:00.000"));
-assertEquals(0, Date.parse("+001970-01T00:00:00"));
-assertEquals(0, Date.parse("+001970-01T00:00"));
-assertEquals(0, Date.parse("+001970-01"));
+assertEquals(0, Date.parse("+001970-01T00:00:00.000Z"));
+assertEquals(0, Date.parse("+001970-01T00:00:00Z"));
+assertEquals(0, Date.parse("+001970-01T00:00Z"));
+assertEquals(0, Date.parse("+001970-01Z"));
assertEquals(0, Date.parse("+001970T00:00:00.000+00:00"));
assertEquals(0, Date.parse("+001970T00:00:00.000-00:00"));
assertEquals(0, Date.parse("+001970T00:00:00.000Z"));
-assertEquals(0, Date.parse("+001970T00:00:00.000"));
-assertEquals(0, Date.parse("+001970T00:00:00"));
-assertEquals(0, Date.parse("+001970T00:00"));
-assertEquals(0, Date.parse("+001970"));
+assertEquals(0, Date.parse("+001970T00:00:00.000Z"));
+assertEquals(0, Date.parse("+001970T00:00:00Z"));
+assertEquals(0, Date.parse("+001970T00:00Z"));
+assertEquals(0, Date.parse("+001970Z"));
// Check random date.
assertEquals(70671003500, Date.parse("1972-03-28T23:50:03.500+01:00"));
assertEquals(70674603500, Date.parse("1972-03-28T23:50:03.500Z"));
-assertEquals(70674603500, Date.parse("1972-03-28T23:50:03.500"));
-assertEquals(70674603000, Date.parse("1972-03-28T23:50:03"));
-assertEquals(70674600000, Date.parse("1972-03-28T23:50"));
-assertEquals(70588800000, Date.parse("1972-03-28"));
+assertEquals(70674603500, Date.parse("1972-03-28T23:50:03.500Z"));
+assertEquals(70674603000, Date.parse("1972-03-28T23:50:03Z"));
+assertEquals(70674600000, Date.parse("1972-03-28T23:50Z"));
+assertEquals(70588800000, Date.parse("1972-03-28Z"));
assertEquals(68338203500, Date.parse("1972-03T23:50:03.500+01:00"));
assertEquals(68341803500, Date.parse("1972-03T23:50:03.500Z"));
-assertEquals(68341803500, Date.parse("1972-03T23:50:03.500"));
-assertEquals(68341803000, Date.parse("1972-03T23:50:03"));
-assertEquals(68341800000, Date.parse("1972-03T23:50"));
-assertEquals(68256000000, Date.parse("1972-03"));
+assertEquals(68341803500, Date.parse("1972-03T23:50:03.500Z"));
+assertEquals(68341803000, Date.parse("1972-03T23:50:03Z"));
+assertEquals(68341800000, Date.parse("1972-03T23:50Z"));
+assertEquals(68256000000, Date.parse("1972-03Z"));
assertEquals(63154203500, Date.parse("1972T23:50:03.500+01:00"));
assertEquals(63157803500, Date.parse("1972T23:50:03.500Z"));
-assertEquals(63157803500, Date.parse("1972T23:50:03.500"));
-assertEquals(63157803000, Date.parse("1972T23:50:03"));
-assertEquals(63072000000, Date.parse("1972"));
+assertEquals(63157803500, Date.parse("1972T23:50:03.500Z"));
+assertEquals(63157803000, Date.parse("1972T23:50:03Z"));
+assertEquals(63072000000, Date.parse("1972Z"));
assertEquals(70671003500, Date.parse("+001972-03-28T23:50:03.500+01:00"));
assertEquals(70674603500, Date.parse("+001972-03-28T23:50:03.500Z"));
-assertEquals(70674603500, Date.parse("+001972-03-28T23:50:03.500"));
-assertEquals(70674603000, Date.parse("+001972-03-28T23:50:03"));
-assertEquals(70674600000, Date.parse("+001972-03-28T23:50"));
-assertEquals(70588800000, Date.parse("+001972-03-28"));
+assertEquals(70674603500, Date.parse("+001972-03-28T23:50:03.500Z"));
+assertEquals(70674603000, Date.parse("+001972-03-28T23:50:03Z"));
+assertEquals(70674600000, Date.parse("+001972-03-28T23:50Z"));
+assertEquals(70588800000, Date.parse("+001972-03-28Z"));
assertEquals(68338203500, Date.parse("+001972-03T23:50:03.500+01:00"));
assertEquals(68341803500, Date.parse("+001972-03T23:50:03.500Z"));
-assertEquals(68341803500, Date.parse("+001972-03T23:50:03.500"));
-assertEquals(68341803000, Date.parse("+001972-03T23:50:03"));
-assertEquals(68341800000, Date.parse("+001972-03T23:50"));
-assertEquals(68256000000, Date.parse("+001972-03"));
+assertEquals(68341803500, Date.parse("+001972-03T23:50:03.500Z"));
+assertEquals(68341803000, Date.parse("+001972-03T23:50:03Z"));
+assertEquals(68341800000, Date.parse("+001972-03T23:50Z"));
+assertEquals(68256000000, Date.parse("+001972-03Z"));
assertEquals(63154203500, Date.parse("+001972T23:50:03.500+01:00"));
assertEquals(63157803500, Date.parse("+001972T23:50:03.500Z"));
-assertEquals(63157803500, Date.parse("+001972T23:50:03.500"));
-assertEquals(63157803000, Date.parse("+001972T23:50:03"));
-assertEquals(63072000000, Date.parse("+001972"));
+assertEquals(63157803500, Date.parse("+001972T23:50:03.500Z"));
+assertEquals(63157803000, Date.parse("+001972T23:50:03Z"));
+assertEquals(63072000000, Date.parse("+001972Z"));
// Ensure that ISO-years in the range 00-99 aren't translated to the range
// 1950..2049.
-assertEquals(-60904915200000, Date.parse("0040-01-01"));
-assertEquals(-60273763200000, Date.parse("0060-01-01"));
-assertEquals(-62167219200000, Date.parse("0000-01-01"));
-assertEquals(-62167219200000, Date.parse("+000000-01-01"));
+assertEquals(-60904915200000, Date.parse("0040-01-01T00:00Z"));
+assertEquals(-60273763200000, Date.parse("0060-01-01T00:00Z"));
+assertEquals(-62167219200000, Date.parse("0000-01-01T00:00Z"));
+assertEquals(-62167219200000, Date.parse("+000000-01-01T00:00Z"));
// Test negative years.
-assertEquals(-63429523200000, Date.parse("-000040-01-01"));
-assertEquals(-64060675200000, Date.parse("-000060-01-01"));
-assertEquals(-124397510400000, Date.parse("-001972-01-01"));
+assertEquals(-63429523200000, Date.parse("-000040-01-01Z"));
+assertEquals(-64060675200000, Date.parse("-000060-01-01Z"));
+assertEquals(-124397510400000, Date.parse("-001972-01-01Z"));
// Check time-zones.
assertEquals(70674603500, Date.parse("1972-03-28T23:50:03.500Z"));
diff --git a/deps/v8/test/mjsunit/debug-evaluate.js b/deps/v8/test/mjsunit/debug-evaluate.js
index accf656d60..46eddef9c4 100644
--- a/deps/v8/test/mjsunit/debug-evaluate.js
+++ b/deps/v8/test/mjsunit/debug-evaluate.js
@@ -136,6 +136,7 @@ function f() {
function g() {
var a = 2;
f();
+ return a; // Use the value to prevent it being removed by DCE.
};
a = 1;
diff --git a/deps/v8/test/mjsunit/debug-function-scopes.js b/deps/v8/test/mjsunit/debug-function-scopes.js
index 8992fe79c5..fac3b16b8d 100644
--- a/deps/v8/test/mjsunit/debug-function-scopes.js
+++ b/deps/v8/test/mjsunit/debug-function-scopes.js
@@ -42,7 +42,7 @@ function CheckScope(scope_mirror, scope_expectations, expected_scope_type) {
}
}
-// A copy of the scope types from mirror-debugger.js.
+// A copy of the scope types from debug/mirrors.js.
var ScopeType = { Global: 0,
Local: 1,
With: 2,
diff --git a/deps/v8/test/mjsunit/debug-liveedit-restart-frame.js b/deps/v8/test/mjsunit/debug-liveedit-restart-frame.js
index d978a9709f..a3182d7bfa 100644
--- a/deps/v8/test/mjsunit/debug-liveedit-restart-frame.js
+++ b/deps/v8/test/mjsunit/debug-liveedit-restart-frame.js
@@ -97,8 +97,9 @@ function TestCase(test_scenario, expected_output) {
return;
}
var frame = FindCallFrame(exec_state, change_code);
- // Throws if fails.
- Debug.LiveEdit.RestartFrame(frame);
+ var error = frame.restart();
+ if (typeof error === 'string')
+ throw new Error(error);
}
}
diff --git a/deps/v8/test/mjsunit/debug-materialized.js b/deps/v8/test/mjsunit/debug-materialized.js
new file mode 100644
index 0000000000..0b01b78df4
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-materialized.js
@@ -0,0 +1,41 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-debug-as debug
+
+function dbg(x) {
+ debugger;
+}
+
+function foo() {
+ arguments[0];
+ dbg();
+}
+
+function bar() {
+ var t = { a : 1 };
+ dbg();
+ return t.a;
+}
+
+foo(1);
+foo(1);
+bar(1);
+bar(1);
+%OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionOnNextCall(bar);
+
+var Debug = debug.Debug;
+Debug.setListener(function(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ for (var i = 0; i < exec_state.frameCount(); i++) {
+ var f = exec_state.frame(i);
+ for (var j = 0; j < f.localCount(); j++) {
+ print("'" + f.localName(j) + "' = " + f.localValue(j).value());
+ }
+ }
+});
+
+foo(1);
+bar(1);
diff --git a/deps/v8/test/mjsunit/debug-mirror-cache.js b/deps/v8/test/mjsunit/debug-mirror-cache.js
index c690aa0133..8ac6d9a70d 100644
--- a/deps/v8/test/mjsunit/debug-mirror-cache.js
+++ b/deps/v8/test/mjsunit/debug-mirror-cache.js
@@ -51,8 +51,7 @@ function listener(event, exec_state, event_data, data) {
listenerCallCount++;
// Check that mirror cache is cleared when entering debugger.
- assertEquals(0, debug.next_handle_, "Mirror cache not cleared");
- assertEquals(0, debug.mirror_cache_.length, "Mirror cache not cleared");
+ assertTrue(debug.MirrorCacheIsEmpty(), "Mirror cache not cleared");
// Get the debug command processor in paused state.
var dcp = exec_state.debugCommandProcessor(false);
@@ -66,8 +65,7 @@ function listener(event, exec_state, event_data, data) {
Debug.scripts();
// Some mirrors where cached.
- assertFalse(debug.next_handle_ == 0, "Mirror cache not used");
- assertFalse(debug.mirror_cache_.length == 0, "Mirror cache not used");
+ assertFalse(debug.MirrorCacheIsEmpty(), "Mirror cache not used");
}
} catch (e) {
print(e);
diff --git a/deps/v8/test/mjsunit/debug-optimize.js b/deps/v8/test/mjsunit/debug-optimize.js
new file mode 100644
index 0000000000..d1ce63d5a0
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-optimize.js
@@ -0,0 +1,54 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax --use-inlining
+
+var Debug = debug.Debug;
+
+function f1() {
+ return 1;
+}
+
+function f2() {
+ return 2;
+}
+
+function f3() {
+ return f1();
+}
+
+function f4() {
+ return 4;
+}
+
+
+function optimize(f) {
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+}
+
+optimize(f1);
+optimize(f2);
+optimize(f3);
+
+Debug.setListener(function() {});
+
+assertOptimized(f1);
+assertOptimized(f2);
+assertOptimized(f3);
+
+Debug.setBreakPoint(f1, 1);
+
+// Setting break point deoptimizes f1 and f3 (which inlines f1).
+assertUnoptimized(f1);
+assertOptimized(f2);
+assertUnoptimized(f3);
+
+// We can optimize with break points set.
+optimize(f4);
+assertOptimized(f4);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-return-value.js b/deps/v8/test/mjsunit/debug-return-value.js
index 02d6a7cbc9..3ea106c40f 100644
--- a/deps/v8/test/mjsunit/debug-return-value.js
+++ b/deps/v8/test/mjsunit/debug-return-value.js
@@ -124,6 +124,7 @@ function listener(event, exec_state, event_data, data) {
}
} catch (e) {
exception = e
+ print(e + e.stack)
};
};
diff --git a/deps/v8/test/mjsunit/debug-script-breakpoints.js b/deps/v8/test/mjsunit/debug-script-breakpoints.js
index d4ce6dc98b..6f31ef11d8 100644
--- a/deps/v8/test/mjsunit/debug-script-breakpoints.js
+++ b/deps/v8/test/mjsunit/debug-script-breakpoints.js
@@ -30,37 +30,51 @@
Debug = debug.Debug
Debug.setListener(function(){});
+var script_id;
+var script_name;
+
+// Get current script id and name.
+var scripts = Debug.scripts();
+for (var i = 0; i < scripts.length; i++) {
+ var name = scripts[i].name;
+ var id = scripts[i].id;
+ if (name !== undefined && name.includes("debug-script-breakpoints.js")) {
+ script_id = id;
+ script_name = name;
+ break;
+ }
+}
+assertTrue(script_id !== undefined);
+assertTrue(script_name !== undefined);
+print("#" + script_id + ": " + script_name);
+
+
+// Checks script name, line and column.
+var checkBreakPoint = function(id, line, column) {
+ var breakpoint = Debug.scriptBreakPoints()[id];
+ assertEquals(script_name, breakpoint.script_name());
+ assertEquals(line, breakpoint.line());
+ assertEquals(column, breakpoint.column());
+}
+
+
// Set and remove a script break point for a named script.
-var sbp = Debug.setScriptBreakPointByName("1", 2, 3);
+var sbp = Debug.setScriptBreakPointByName(script_name, 35, 5);
assertEquals(1, Debug.scriptBreakPoints().length);
-assertEquals("1", Debug.scriptBreakPoints()[0].script_name());
-assertEquals(2, Debug.scriptBreakPoints()[0].line());
-assertEquals(3, Debug.scriptBreakPoints()[0].column());
+checkBreakPoint(0, 35, 5);
Debug.clearBreakPoint(sbp);
assertEquals(0, Debug.scriptBreakPoints().length);
// Set three script break points for named scripts.
-var sbp1 = Debug.setScriptBreakPointByName("1", 2, 3);
-var sbp2 = Debug.setScriptBreakPointByName("2", 3, 4);
-var sbp3 = Debug.setScriptBreakPointByName("3", 4, 5);
+var sbp1 = Debug.setScriptBreakPointByName(script_name, 36, 3);
+var sbp2 = Debug.setScriptBreakPointByName(script_name, 37, 4);
+var sbp3 = Debug.setScriptBreakPointByName(script_name, 38, 5);
// Check the content of the script break points.
assertEquals(3, Debug.scriptBreakPoints().length);
-for (var i = 0; i < Debug.scriptBreakPoints().length; i++) {
- var x = Debug.scriptBreakPoints()[i];
- if ("1" == x.script_name()) {
- assertEquals(2, x.line());
- assertEquals(3, x.column());
- } else if ("2" == x.script_name()) {
- assertEquals(3, x.line());
- assertEquals(4, x.column());
- } else if ("3" == x.script_name()) {
- assertEquals(4, x.line());
- assertEquals(5, x.column());
- } else {
- assertUnreachable("unecpected script_name " + x.script_name());
- }
-}
+checkBreakPoint(0, 36, 3);
+checkBreakPoint(1, 37, 4);
+checkBreakPoint(2, 38, 5);
// Remove script break points (in another order than they where added).
assertEquals(3, Debug.scriptBreakPoints().length);
@@ -71,37 +85,33 @@ assertEquals(1, Debug.scriptBreakPoints().length);
Debug.clearBreakPoint(sbp2);
assertEquals(0, Debug.scriptBreakPoints().length);
+
+// Checks script id, line and column.
+var checkBreakPoint = function(id, line, column) {
+ var breakpoint = Debug.scriptBreakPoints()[id];
+ assertEquals(script_id, breakpoint.script_id());
+ assertEquals(line, breakpoint.line());
+ assertEquals(column, breakpoint.column());
+}
+
+
// Set and remove a script break point for a script id.
-var sbp = Debug.setScriptBreakPointById(1, 2, 3);
+var sbp = Debug.setScriptBreakPointById(script_id, 40, 6);
assertEquals(1, Debug.scriptBreakPoints().length);
-assertEquals(1, Debug.scriptBreakPoints()[0].script_id());
-assertEquals(2, Debug.scriptBreakPoints()[0].line());
-assertEquals(3, Debug.scriptBreakPoints()[0].column());
+checkBreakPoint(0, 40, 6);
Debug.clearBreakPoint(sbp);
assertEquals(0, Debug.scriptBreakPoints().length);
// Set three script break points for script ids.
-var sbp1 = Debug.setScriptBreakPointById(1, 2, 3);
-var sbp2 = Debug.setScriptBreakPointById(2, 3, 4);
-var sbp3 = Debug.setScriptBreakPointById(3, 4, 5);
+var sbp1 = Debug.setScriptBreakPointById(script_id, 42, 3);
+var sbp2 = Debug.setScriptBreakPointById(script_id, 43, 4);
+var sbp3 = Debug.setScriptBreakPointById(script_id, 44, 5);
// Check the content of the script break points.
assertEquals(3, Debug.scriptBreakPoints().length);
-for (var i = 0; i < Debug.scriptBreakPoints().length; i++) {
- var x = Debug.scriptBreakPoints()[i];
- if (1 == x.script_id()) {
- assertEquals(2, x.line());
- assertEquals(3, x.column());
- } else if (2 == x.script_id()) {
- assertEquals(3, x.line());
- assertEquals(4, x.column());
- } else if (3 == x.script_id()) {
- assertEquals(4, x.line());
- assertEquals(5, x.column());
- } else {
- assertUnreachable("unecpected script_id " + x.script_id());
- }
-}
+checkBreakPoint(0, 42, 3);
+checkBreakPoint(1, 43, 4);
+checkBreakPoint(2, 44, 5);
// Remove script break points (in another order than they where added).
assertEquals(3, Debug.scriptBreakPoints().length);
diff --git a/deps/v8/test/mjsunit/debug-stepin-construct-call.js b/deps/v8/test/mjsunit/debug-stepin-construct-call.js
new file mode 100644
index 0000000000..5e2145591f
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-stepin-construct-call.js
@@ -0,0 +1,42 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+var break_count = 0;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var source_line = exec_state.frame(0).sourceLineText();
+ print(source_line);
+ exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ break_count++;
+ } catch (e) {
+ exception = e;
+ }
+}
+
+var Debug = debug.Debug;
+Debug.setListener(listener);
+
+
+function f() {
+ this.x = 1;
+}
+
+function g() {
+ new f();
+}
+
+Debug.setBreakPoint(g, 6, Debug.BreakPositionAlignment.BreakPosition);
+print(Debug.showBreakPoints(g, undefined,
+ Debug.BreakPositionAlignment.BreakPosition));
+
+g();
+Debug.setListener(null);
+
+assertEquals(6, break_count);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/element-accessor.js b/deps/v8/test/mjsunit/element-accessor.js
new file mode 100644
index 0000000000..452afc8d16
--- /dev/null
+++ b/deps/v8/test/mjsunit/element-accessor.js
@@ -0,0 +1,33 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function () {
+ var o = [];
+ o.__proto__ = {};
+
+ function store(o, i, v) {
+ o[i] = v;
+ }
+
+ store(o, 0, 0);
+ store(o, 1, 0);
+ store(o, 2, 0);
+ o.__proto__[10000000] = 1;
+
+ var set = 0;
+
+ Object.defineProperty(o, "3", {
+ get:function() { return 100; },
+ set:function(v) { set = v; }});
+
+ store(o, 3, 1000);
+ assertEquals(1000, set);
+ assertEquals(100, o[3]);
+})();
+
+(function () {
+ var o = new Int32Array();
+ Object.defineProperty(o, "0", {get: function(){}});
+ assertEquals(undefined, Object.getOwnPropertyDescriptor(o, "0"));
+})();
diff --git a/deps/v8/test/mjsunit/element-read-only.js b/deps/v8/test/mjsunit/element-read-only.js
new file mode 100644
index 0000000000..9ec027f6cc
--- /dev/null
+++ b/deps/v8/test/mjsunit/element-read-only.js
@@ -0,0 +1,154 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(a, b, c, d) { return arguments; }
+
+// Ensure non-configurable argument elements stay non-configurable.
+(function () {
+ var args = f(1);
+ Object.defineProperty(args, "0", {value: 10, configurable: false});
+ assertFalse(Object.getOwnPropertyDescriptor(args, "0").configurable);
+ for (var i = 0; i < 10; i++) {
+ args[i] = 1;
+ }
+ assertFalse(Object.getOwnPropertyDescriptor(args, "0").configurable);
+})();
+
+// Ensure read-only properties on the prototype chain cause TypeError.
+
+// Newly added.
+(function () {
+ var o = [];
+ var proto = {};
+ var index = 3;
+ function store(o, i, v) { "use strict"; o[i] = v; };
+ o.__proto__ = proto;
+ for (var i = 0; i < index; i++) {
+ store(o, i, 0);
+ }
+ Object.defineProperty(proto, index, {value: 100, writable: false});
+ assertThrows(function() { store(o, index, 0); });
+ assertEquals(100, o[index]);
+})();
+
+// Reconfigured.
+(function () {
+ var o = [];
+ var proto = {3: 10000};
+ var index = 3;
+ function store(o, i, v) { "use strict"; o[i] = v; };
+ o.__proto__ = proto;
+ for (var i = 0; i < index; i++) {
+ store(o, i, 0);
+ }
+ Object.defineProperty(proto, index, {value: 100, writable: false});
+ assertThrows(function() { store(o, index, 0); });
+ assertEquals(100, o[index]);
+})();
+
+// Newly added to arguments object.
+(function () {
+ var o = [];
+ var proto = f(100);
+ var index = 3;
+ function store(o, i, v) { "use strict"; o[i] = v; };
+ o.__proto__ = proto;
+ for (var i = 0; i < index; i++) {
+ store(o, i, 0);
+ }
+ Object.defineProperty(proto, index, {value: 100, writable: false});
+ assertThrows(function() { store(o, index, 0); });
+ assertEquals(100, o[index]);
+})();
+
+// Reconfigured on to arguments object.
+(function () {
+ var o = [];
+ var proto = f(100, 200, 300, 400);
+ var index = 3;
+ function store(o, i, v) { "use strict"; o[i] = v; };
+ o.__proto__ = proto;
+ for (var i = 0; i < index; i++) {
+ store(o, i, 0);
+ }
+ Object.defineProperty(proto, index, {value: 100, writable: false});
+ assertThrows(function() { store(o, index, 0); });
+ assertEquals(100, o[index]);
+})();
+
+// Extensions prevented object.
+(function () {
+ var o = [];
+ var proto = [0, 1, 2, 3];
+ var index = 3;
+ function store(o, i, v) { "use strict"; o[i] = v; };
+ o.__proto__ = proto;
+ for (var i = 0; i < index; i++) {
+ store(o, i, 0);
+ }
+ Object.preventExtensions(proto);
+ Object.defineProperty(proto, index, {value: 100, writable: false});
+ assertThrows(function() { store(o, index, 0); });
+ assertEquals(100, o[index]);
+})();
+
+// Extensions prevented arguments object.
+(function () {
+ var o = [];
+ var proto = f(100, 200, 300, 400);
+ var index = 3;
+ function store(o, i, v) { "use strict"; o[i] = v; };
+ o.__proto__ = proto;
+ for (var i = 0; i < index; i++) {
+ store(o, i, 0);
+ }
+ Object.preventExtensions(proto);
+ Object.defineProperty(proto, index, {value: 100, writable: false});
+ assertThrows(function() { store(o, index, 0); });
+ assertEquals(100, o[index]);
+})();
+
+// Array with large index.
+(function () {
+ var o = [];
+ var proto = [];
+ var index = 3;
+ function store(o, i, v) { "use strict"; o[i] = v; };
+ o.__proto__ = proto;
+ for (var i = 0; i < index; i++) {
+ store(o, i, 0);
+ }
+ proto[1 << 30] = 1;
+ Object.defineProperty(proto, index, {value: 100, writable: false});
+ assertThrows(function() { store(o, index, 0); });
+ assertEquals(100, o[index]);
+})();
+
+// Frozen object.
+(function () {
+ var o = [];
+ var proto = [0, 1, 2, 3];
+ function store(o, i, v) { "use strict"; o[i] = v; };
+ o.__proto__ = proto;
+ for (var i = 0; i < 3; i++) {
+ store(o, i, 0);
+ }
+ Object.freeze(proto);
+ assertThrows(function() { store(o, 3, 0); });
+ assertEquals(3, o[3]);
+})();
+
+// Frozen arguments object.
+(function () {
+ var o = [];
+ var proto = f(0, 1, 2, 3);
+ function store(o, i, v) { "use strict"; o[i] = v; };
+ o.__proto__ = proto;
+ for (var i = 0; i < 3; i++) {
+ store(o, i, 0);
+ }
+ Object.freeze(proto);
+ assertThrows(function() { store(o, 3, 0); });
+ assertEquals(3, o[3]);
+})();
diff --git a/deps/v8/test/mjsunit/elements-kind.js b/deps/v8/test/mjsunit/elements-kind.js
index cb2d178a7e..4da8a9dc60 100644
--- a/deps/v8/test/mjsunit/elements-kind.js
+++ b/deps/v8/test/mjsunit/elements-kind.js
@@ -32,15 +32,6 @@ var elements_kind = {
fast : 'fast elements',
fast_double : 'fast double elements',
dictionary : 'dictionary elements',
- external_int32 : 'external int8 elements',
- external_uint8 : 'external uint8 elements',
- external_int16 : 'external int16 elements',
- external_uint16 : 'external uint16 elements',
- external_int32 : 'external int32 elements',
- external_uint32 : 'external uint32 elements',
- external_float32 : 'external float32 elements',
- external_float64 : 'external float64 elements',
- external_uint8_clamped : 'external uint8_clamped elements',
fixed_int32 : 'fixed int8 elements',
fixed_uint8 : 'fixed uint8 elements',
fixed_int16 : 'fixed int16 elements',
@@ -58,34 +49,6 @@ function getKind(obj) {
if (%HasFastDoubleElements(obj)) return elements_kind.fast_double;
if (%HasDictionaryElements(obj)) return elements_kind.dictionary;
- // Every external kind is also an external array.
- if (%HasExternalInt8Elements(obj)) {
- return elements_kind.external_int8;
- }
- if (%HasExternalUint8Elements(obj)) {
- return elements_kind.external_uint8;
- }
- if (%HasExternalInt16Elements(obj)) {
- return elements_kind.external_int16;
- }
- if (%HasExternalUint16Elements(obj)) {
- return elements_kind.external_uint16;
- }
- if (%HasExternalInt32Elements(obj)) {
- return elements_kind.external_int32;
- }
- if (%HasExternalUint32Elements(obj)) {
- return elements_kind.external_uint32;
- }
- if (%HasExternalFloat32Elements(obj)) {
- return elements_kind.external_float32;
- }
- if (%HasExternalFloat64Elements(obj)) {
- return elements_kind.external_float64;
- }
- if (%HasExternalUint8ClampedElements(obj)) {
- return elements_kind.external_uint8_clamped;
- }
if (%HasFixedInt8Elements(obj)) {
return elements_kind.fixed_int8;
}
@@ -164,15 +127,15 @@ function test_wrapper() {
assertKind(elements_kind.fixed_uint8_clamped, new Uint8ClampedArray(512));
var ab = new ArrayBuffer(128);
- assertKind(elements_kind.external_int8, new Int8Array(ab));
- assertKind(elements_kind.external_uint8, new Uint8Array(ab));
- assertKind(elements_kind.external_int16, new Int16Array(ab));
- assertKind(elements_kind.external_uint16, new Uint16Array(ab));
- assertKind(elements_kind.external_int32, new Int32Array(ab));
- assertKind(elements_kind.external_uint32, new Uint32Array(ab));
- assertKind(elements_kind.external_float32, new Float32Array(ab));
- assertKind(elements_kind.external_float64, new Float64Array(ab));
- assertKind(elements_kind.external_uint8_clamped, new Uint8ClampedArray(ab));
+ assertKind(elements_kind.fixed_int8, new Int8Array(ab));
+ assertKind(elements_kind.fixed_uint8, new Uint8Array(ab));
+ assertKind(elements_kind.fixed_int16, new Int16Array(ab));
+ assertKind(elements_kind.fixed_uint16, new Uint16Array(ab));
+ assertKind(elements_kind.fixed_int32, new Int32Array(ab));
+ assertKind(elements_kind.fixed_uint32, new Uint32Array(ab));
+ assertKind(elements_kind.fixed_float32, new Float32Array(ab));
+ assertKind(elements_kind.fixed_float64, new Float64Array(ab));
+ assertKind(elements_kind.fixed_uint8_clamped, new Uint8ClampedArray(ab));
// Crankshaft support for smi-only array elements.
function monomorphic(array) {
diff --git a/deps/v8/test/mjsunit/error-constructors.js b/deps/v8/test/mjsunit/error-constructors.js
index 84c6bbfd0c..1ada39de55 100644
--- a/deps/v8/test/mjsunit/error-constructors.js
+++ b/deps/v8/test/mjsunit/error-constructors.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
-
// Check that message and name are not enumerable on Error objects.
var desc = Object.getOwnPropertyDescriptor(Error.prototype, 'name');
assertFalse(desc['enumerable']);
@@ -62,33 +60,65 @@ var e = new ReferenceError('123');
assertTrue(e.hasOwnProperty('message'));
assertTrue(e.hasOwnProperty('stack'));
-var e = %MakeReferenceError("my_test_error", [0, 1]);
+try {
+ eval("var error = reference");
+} catch (error) {
+ e = error;
+}
+
assertTrue(e.hasOwnProperty('stack'));
// Check that intercepting property access from toString is prevented for
// compiler errors. This is not specified, but allowing interception
// through a getter can leak error objects from different
// script tags in the same context in a browser setting.
-var errors = [SyntaxError, ReferenceError, TypeError];
+var errors = [SyntaxError, ReferenceError, TypeError, RangeError, URIError];
+var error_triggers = ["syntax error",
+ "var error = reference",
+ "undefined()",
+ "String.fromCodePoint(0xFFFFFF)",
+ "decodeURI('%F')"];
for (var i in errors) {
- var name = errors[i].prototype.toString();
+ var name = errors[i].name;
+
// Monkey-patch prototype.
var props = ["name", "message", "stack"];
for (var j in props) {
errors[i].prototype.__defineGetter__(props[j], fail);
}
// String conversion should not invoke monkey-patched getters on prototype.
- var e = new errors[i];
- assertEquals(name, e.toString());
+ var error;
+ try {
+ eval(error_triggers[i]);
+ } catch (e) {
+ error = e;
+ }
+ assertTrue(error.toString().startsWith(name));
+
+ // Deleting message on the error (exposing the getter) is fine.
+ delete error.message;
+ assertEquals(name, error.toString());
+
+ // Custom properties shadowing the name are fine.
+ var myerror = { name: "myerror", message: "mymessage"};
+ myerror.__proto__ = error;
+ assertEquals("myerror: mymessage", myerror.toString());
+
// Custom getters in actual objects are welcome.
- e.__defineGetter__("name", function() { return "mine"; });
- assertEquals("mine", e.toString());
+ error.__defineGetter__("name", function() { return "mine"; });
+ assertEquals("mine", error.toString());
+
+ // Custom properties shadowing the name are fine.
+ var myerror2 = { message: "mymessage"};
+ myerror2.__proto__ = error;
+ assertEquals("mine: mymessage", myerror2.toString());
}
-// Monkey-patching non-static errors should still be observable.
+// Monkey-patching non-internal errors should still be observable.
function MyError() {}
MyError.prototype = new Error;
-var errors = [Error, RangeError, EvalError, URIError, MyError];
+var errors = [Error, RangeError, EvalError, URIError,
+ SyntaxError, ReferenceError, TypeError, MyError];
for (var i in errors) {
errors[i].prototype.__defineGetter__("name", function() { return "my"; });
errors[i].prototype.__defineGetter__("message", function() { return "moo"; });
diff --git a/deps/v8/test/mjsunit/es6/array-iterator.js b/deps/v8/test/mjsunit/es6/array-iterator.js
index 767991eafe..5fab0fbf86 100644
--- a/deps/v8/test/mjsunit/es6/array-iterator.js
+++ b/deps/v8/test/mjsunit/es6/array-iterator.js
@@ -47,6 +47,9 @@ function TestArrayPrototype() {
assertHasOwnProperty(Array.prototype, 'entries', DONT_ENUM);
assertHasOwnProperty(Array.prototype, 'keys', DONT_ENUM);
assertHasOwnProperty(Array.prototype, Symbol.iterator, DONT_ENUM);
+ assertEquals('entries', Array.prototype.entries.name);
+ assertEquals('keys', Array.prototype.keys.name);
+ assertEquals('values', Array.prototype[Symbol.iterator].name);
}
TestArrayPrototype();
diff --git a/deps/v8/test/mjsunit/es6/array-reverse-order.js b/deps/v8/test/mjsunit/es6/array-reverse-order.js
new file mode 100644
index 0000000000..590491cb68
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/array-reverse-order.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ES6 specifically says that elements should be checked with [[HasElement]] before
+// [[Get]]. This is observable in case a getter deletes elements. ES5 put the
+// [[HasElement]] after the [[Get]].
+
+assertTrue(1 in Array.prototype.reverse.call(
+ {length:2, get 0(){delete this[0];}, 1: "b"}))
diff --git a/deps/v8/test/mjsunit/es6/block-conflicts.js b/deps/v8/test/mjsunit/es6/block-conflicts.js
index fdd581dd70..0e3d4e5a2a 100644
--- a/deps/v8/test/mjsunit/es6/block-conflicts.js
+++ b/deps/v8/test/mjsunit/es6/block-conflicts.js
@@ -79,7 +79,11 @@ var letbinds = [ "let x;",
"const x = function() {};",
"const x = 2, y = 3;",
"const y = 4, x = 5;",
+ "class x { }",
];
+function forCompatible(bind) {
+ return !bind.startsWith('class');
+}
var varbinds = [ "var x;",
"var x = 0;",
"var x = undefined;",
@@ -101,7 +105,9 @@ for (var l = 0; l < letbinds.length; ++l) {
TestNoConflict(varbinds[v] + '{' + letbinds[l] + '}');
TestNoConflict('{' + letbinds[l] + '}' + varbinds[v]);
// For loop.
- TestConflict('for (' + letbinds[l] + '0;) {' + varbinds[v] + '}');
+ if (forCompatible(letbinds[l])) {
+ TestConflict('for (' + letbinds[l] + '0;) {' + varbinds[v] + '}');
+ }
TestNoConflict('for (' + varbinds[v] + '0;) {' + letbinds[l] + '}');
}
@@ -114,8 +120,12 @@ for (var l = 0; l < letbinds.length; ++l) {
TestNoConflict(letbinds[l] + '{ ' + letbinds[k] + '}');
TestNoConflict('{' + letbinds[k] +'} ' + letbinds[l]);
// For loop.
- TestNoConflict('for (' + letbinds[l] + '0;) {' + letbinds[k] + '}');
- TestNoConflict('for (' + letbinds[k] + '0;) {' + letbinds[l] + '}');
+ if (forCompatible(letbinds[l])) {
+ TestNoConflict('for (' + letbinds[l] + '0;) {' + letbinds[k] + '}');
+ }
+ if (forCompatible(letbinds[k])) {
+ TestNoConflict('for (' + letbinds[k] + '0;) {' + letbinds[l] + '}');
+ }
}
// Test conflicting function/let bindings.
@@ -128,7 +138,9 @@ for (var l = 0; l < letbinds.length; ++l) {
TestNoConflict(funbind + '{' + letbinds[l] + '}');
TestNoConflict('{' + letbinds[l] + '}' + funbind);
// For loop.
- TestNoConflict('for (' + letbinds[l] + '0;) {' + funbind + '}');
+ if (forCompatible(letbinds[l])) {
+ TestNoConflict('for (' + letbinds[l] + '0;) {' + funbind + '}');
+ }
// Test conflicting parameter/let bindings.
TestConflict('(function(x) {' + letbinds[l] + '})();');
diff --git a/deps/v8/test/mjsunit/es6/block-const-assign.js b/deps/v8/test/mjsunit/es6/block-const-assign.js
index f78faa689d..541dc0d97b 100644
--- a/deps/v8/test/mjsunit/es6/block-const-assign.js
+++ b/deps/v8/test/mjsunit/es6/block-const-assign.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-computed-property-names
-
// Test that we throw early syntax errors in harmony mode
// when using an immutable binding in an assigment or with
// prefix/postfix decrement/increment operators.
diff --git a/deps/v8/test/mjsunit/es6/block-leave.js b/deps/v8/test/mjsunit/es6/block-leave.js
index 338631b76e..4c63b77319 100644
--- a/deps/v8/test/mjsunit/es6/block-leave.js
+++ b/deps/v8/test/mjsunit/es6/block-leave.js
@@ -175,7 +175,7 @@ try {
// Verify that the context is correctly set in the stack frame after exiting
-// from with.
+// from eval.
function f() {}
(function(x) {
diff --git a/deps/v8/test/mjsunit/es6/block-let-declaration.js b/deps/v8/test/mjsunit/es6/block-let-declaration.js
index 5fbb12824b..a138144d18 100644
--- a/deps/v8/test/mjsunit/es6/block-let-declaration.js
+++ b/deps/v8/test/mjsunit/es6/block-let-declaration.js
@@ -33,17 +33,20 @@
let x;
let y = 2;
const z = 4;
+class c { static foo() { return 1; } }
// Block local
{
let y;
let x = 3;
const z = 5;
+ class c { static foo() { return 2; } }
}
assertEquals(undefined, x);
assertEquals(2,y);
assertEquals(4,z);
+assertEquals(1, c.foo());
if (true) {
let y;
@@ -106,6 +109,16 @@ TestLocalDoesNotThrow("for (;false;) var x;");
TestLocalDoesNotThrow("switch (true) { case true: var x; }");
TestLocalDoesNotThrow("switch (true) { default: var x; }");
+// Test class declarations with initialisers in statement positions.
+TestLocalThrows("if (true) class x { };", SyntaxError);
+TestLocalThrows("if (true) {} else class x { };", SyntaxError);
+TestLocalThrows("do class x { }; while (false)", SyntaxError);
+TestLocalThrows("while (false) class x { };", SyntaxError);
+TestLocalThrows("label: class x { };", SyntaxError);
+TestLocalThrows("for (;false;) class x { };", SyntaxError);
+TestLocalDoesNotThrow("switch (true) { case true: class x { }; }");
+TestLocalDoesNotThrow("switch (true) { default: class x { }; }");
+
// Test that redeclarations of functions are only allowed in outermost scope.
TestLocalThrows("{ let f; var f; }");
TestLocalThrows("{ var f; let f; }");
@@ -113,9 +126,13 @@ TestLocalThrows("{ function f() {} let f; }");
TestLocalThrows("{ let f; function f() {} }");
TestLocalThrows("{ function f() {} var f; }");
TestLocalThrows("{ var f; function f() {} }");
+TestLocalThrows("{ function f() {} class f {} }");
+TestLocalThrows("{ class f {}; function f() {} }");
TestLocalThrows("{ function f() {} function f() {} }");
TestLocalThrows("function f() {} let f;");
TestLocalThrows("let f; function f() {}");
+TestLocalThrows("function f() {} class f {}");
+TestLocalThrows("class f {}; function f() {}");
TestLocalDoesNotThrow("function arg() {}");
TestLocalDoesNotThrow("function f() {} var f;");
TestLocalDoesNotThrow("var f; function f() {}");
diff --git a/deps/v8/test/mjsunit/es6/block-let-semantics.js b/deps/v8/test/mjsunit/es6/block-let-semantics.js
index b0a826a007..59eec1ceea 100644
--- a/deps/v8/test/mjsunit/es6/block-let-semantics.js
+++ b/deps/v8/test/mjsunit/es6/block-let-semantics.js
@@ -70,6 +70,7 @@ TestAll('x += 1; let x;');
TestAll('++x; let x;');
TestAll('x++; let x;');
TestAll('let y = x; const x = 1;');
+TestAll('let y = x; class x {}');
TestAll('f(); let x; function f() { return x + 1; }');
TestAll('f(); let x; function f() { x = 1; }');
@@ -77,6 +78,7 @@ TestAll('f(); let x; function f() { x += 1; }');
TestAll('f(); let x; function f() { ++x; }');
TestAll('f(); let x; function f() { x++; }');
TestAll('f(); const x = 1; function f() { return x; }');
+TestAll('f(); class x { }; function f() { return x; }');
TestAll('f()(); let x; function f() { return function() { return x + 1; } }');
TestAll('f()(); let x; function f() { return function() { x = 1; } }');
@@ -84,21 +86,24 @@ TestAll('f()(); let x; function f() { return function() { x += 1; } }');
TestAll('f()(); let x; function f() { return function() { ++x; } }');
TestAll('f()(); let x; function f() { return function() { x++; } }');
TestAll('f()(); const x = 1; function f() { return function() { return x; } }');
-
-// Use before initialization with a dynamic lookup.
-TestAll('eval("x + 1;"); let x;');
-TestAll('eval("x = 1;"); let x;');
-TestAll('eval("x += 1;"); let x;');
-TestAll('eval("++x;"); let x;');
-TestAll('eval("x++;"); let x;');
-TestAll('eval("x"); const x = 1;');
-
-// Use before initialization with check for eval-shadowed bindings.
-TestAll('function f() { eval("var y = 2;"); x + 1; }; f(); let x;');
-TestAll('function f() { eval("var y = 2;"); x = 1; }; f(); let x;');
-TestAll('function f() { eval("var y = 2;"); x += 1; }; f(); let x;');
-TestAll('function f() { eval("var y = 2;"); ++x; }; f(); let x;');
-TestAll('function f() { eval("var y = 2;"); x++; }; f(); let x;');
+TestAll('f()(); class x { }; function f() { return function() { return x; } }');
+
+for (var kw of ['let x = 2', 'const x = 2', 'class x { }']) {
+ // Use before initialization with a dynamic lookup.
+ TestAll(`eval("x"); ${kw};`);
+ TestAll(`eval("x + 1;"); ${kw};`);
+ TestAll(`eval("x = 1;"); ${kw};`);
+ TestAll(`eval("x += 1;"); ${kw};`);
+ TestAll(`eval("++x;"); ${kw};`);
+ TestAll(`eval("x++;"); ${kw};`);
+
+ // Use before initialization with check for eval-shadowed bindings.
+ TestAll(`function f() { eval("var y = 2;"); x + 1; }; f(); ${kw};`);
+ TestAll(`function f() { eval("var y = 2;"); x = 1; }; f(); ${kw};`);
+ TestAll(`function f() { eval("var y = 2;"); x += 1; }; f(); ${kw};`);
+ TestAll(`function f() { eval("var y = 2;"); ++x; }; f(); ${kw};`);
+ TestAll(`function f() { eval("var y = 2;"); x++; }; f(); ${kw};`);
+}
// Test that variables introduced by function declarations are created and
// initialized upon entering a function / block scope.
diff --git a/deps/v8/test/mjsunit/es6/block-scoping.js b/deps/v8/test/mjsunit/es6/block-scoping.js
index 5f481b8bf2..719f5231ce 100644
--- a/deps/v8/test/mjsunit/es6/block-scoping.js
+++ b/deps/v8/test/mjsunit/es6/block-scoping.js
@@ -49,15 +49,19 @@ function f2(one) {
var x = one + 1;
let y = one + 2;
const u = one + 4;
+ class a { static foo() { return one + 6; } }
{
let z = one + 3;
const v = one + 5;
+ class b { static foo() { return one + 7; } }
assertEquals(1, eval('one'));
assertEquals(2, eval('x'));
assertEquals(3, eval('y'));
assertEquals(4, eval('z'));
assertEquals(5, eval('u'));
assertEquals(6, eval('v'));
+ assertEquals(7, eval('a.foo()'));
+ assertEquals(8, eval('b.foo()'));
}
}
@@ -68,15 +72,19 @@ function f3(one) {
var x = one + 1;
let y = one + 2;
const u = one + 4;
+ class a { static foo() { return one + 6; } }
{
let z = one + 3;
const v = one + 5;
+ class b { static foo() { return one + 7; } }
assertEquals(1, one);
assertEquals(2, x);
assertEquals(3, y);
assertEquals(4, z);
assertEquals(5, u);
assertEquals(6, v);
+ assertEquals(7, a.foo());
+ assertEquals(8, b.foo());
}
}
for (var j = 0; j < 5; ++j) f3(1);
@@ -91,9 +99,11 @@ function f4(one) {
var x = one + 1;
let y = one + 2;
const u = one + 4;
+ class a { static foo() { return one + 6; } }
{
let z = one + 3;
const v = one + 5;
+ class b { static foo() { return one + 7; } }
function f() {
assertEquals(1, eval('one'));
assertEquals(2, eval('x'));
@@ -101,6 +111,8 @@ function f4(one) {
assertEquals(4, eval('z'));
assertEquals(5, eval('u'));
assertEquals(6, eval('v'));
+ assertEquals(7, eval('a.foo()'));
+ assertEquals(8, eval('b.foo()'));
}
f();
}
@@ -113,9 +125,11 @@ function f5(one) {
var x = one + 1;
let y = one + 2;
const u = one + 4;
+ class a { static foo() { return one + 6; } }
{
let z = one + 3;
const v = one + 5;
+ class b { static foo() { return one + 7; } }
function f() {
assertEquals(1, one);
assertEquals(2, x);
@@ -123,6 +137,8 @@ function f5(one) {
assertEquals(4, z);
assertEquals(5, u);
assertEquals(6, v);
+ assertEquals(7, a.foo());
+ assertEquals(8, b.foo());
}
f();
}
@@ -149,25 +165,43 @@ function f7(a) {
var c = 1;
var d = 1;
const e = 1;
- { // let variables shadowing argument, let, const and var variables
+ class f { static foo() { return 1; } }
+ { // let variables shadowing argument, let, const, class and var variables
let a = 2;
let b = 2;
let c = 2;
let e = 2;
+ let f = 2;
assertEquals(2,a);
assertEquals(2,b);
assertEquals(2,c);
assertEquals(2,e);
+ assertEquals(2,f);
}
{ // const variables shadowing argument, let, const and var variables
const a = 2;
const b = 2;
const c = 2;
const e = 2;
+ const f = 2;
assertEquals(2,a);
assertEquals(2,b);
assertEquals(2,c);
assertEquals(2,e);
+ assertEquals(2,f);
+ }
+ { // class variables shadowing argument, let, const and var variables
+ class a { static foo() { return 2; } }
+ class b { static foo() { return 2; } }
+ class c { static foo() { return 2; } }
+ class d { static foo() { return 2; } }
+ class e { static foo() { return 2; } }
+ class f { static foo() { return 2; } }
+ assertEquals(2,a.foo());
+ assertEquals(2,b.foo());
+ assertEquals(2,c.foo());
+ assertEquals(2,e.foo());
+ assertEquals(2,f.foo());
}
try {
throw 'stuff1';
@@ -225,16 +259,18 @@ function f7(a) {
c = 2;
}
assertEquals(1,c);
- (function(a,b,c,e) {
- // arguments shadowing argument, let, const and var variable
+ (function(a,b,c,e,f) {
+ // arguments shadowing argument, let, const, class and var variable
a = 2;
b = 2;
c = 2;
e = 2;
+ f = 2;
assertEquals(2,a);
assertEquals(2,b);
assertEquals(2,c);
assertEquals(2,e);
+ assertEquals(2,f);
// var variable shadowing var variable
var d = 2;
})(1,1);
@@ -243,6 +279,7 @@ function f7(a) {
assertEquals(1,c);
assertEquals(1,d);
assertEquals(1,e);
+ assertEquals(1,f.foo());
}
f7(1);
@@ -253,19 +290,23 @@ function f8() {
var let_accessors = [];
var var_accessors = [];
var const_accessors = [];
+ var class_accessors = [];
for (var i = 0; i < 10; i++) {
let x = i;
var y = i;
const z = i;
+ class a { static foo() { return x; } }
let_accessors[i] = function() { return x; }
var_accessors[i] = function() { return y; }
const_accessors[i] = function() { return z; }
+ class_accessors[i] = function() { return a; }
}
for (var j = 0; j < 10; j++) {
y = j + 10;
assertEquals(j, let_accessors[j]());
assertEquals(y, var_accessors[j]());
assertEquals(j, const_accessors[j]());
+ assertEquals(j, class_accessors[j]().foo());
}
}
f8();
diff --git a/deps/v8/test/mjsunit/harmony/class-computed-property-names-super.js b/deps/v8/test/mjsunit/es6/class-computed-property-names-super.js
index a68b53c18f..cb9f25157c 100644
--- a/deps/v8/test/mjsunit/harmony/class-computed-property-names-super.js
+++ b/deps/v8/test/mjsunit/es6/class-computed-property-names-super.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-computed-property-names --harmony-sloppy
+// Flags: --harmony-sloppy
// Flags: --allow-natives-syntax
diff --git a/deps/v8/test/mjsunit/harmony/computed-property-names-classes.js b/deps/v8/test/mjsunit/es6/computed-property-names-classes.js
index 46a9e9ec2d..eebf99aef5 100644
--- a/deps/v8/test/mjsunit/harmony/computed-property-names-classes.js
+++ b/deps/v8/test/mjsunit/es6/computed-property-names-classes.js
@@ -4,8 +4,6 @@
'use strict';
-// Flags: --harmony-computed-property-names
-
function ID(x) {
return x;
diff --git a/deps/v8/test/mjsunit/harmony/computed-property-names-deopt.js b/deps/v8/test/mjsunit/es6/computed-property-names-deopt.js
index 1f0b0585fc..2f3a597f11 100644
--- a/deps/v8/test/mjsunit/harmony/computed-property-names-deopt.js
+++ b/deps/v8/test/mjsunit/es6/computed-property-names-deopt.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-computed-property-names --allow-natives-syntax
+// Flags: --allow-natives-syntax
(function TestProtoDeopt() {
diff --git a/deps/v8/test/mjsunit/harmony/computed-property-names-object-literals-methods.js b/deps/v8/test/mjsunit/es6/computed-property-names-object-literals-methods.js
index 7ba15aca92..a5f380ceac 100644
--- a/deps/v8/test/mjsunit/harmony/computed-property-names-object-literals-methods.js
+++ b/deps/v8/test/mjsunit/es6/computed-property-names-object-literals-methods.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-computed-property-names
-
function ID(x) {
return x;
diff --git a/deps/v8/test/mjsunit/harmony/computed-property-names-super.js b/deps/v8/test/mjsunit/es6/computed-property-names-super.js
index bfc31c668f..40b0eab942 100644
--- a/deps/v8/test/mjsunit/harmony/computed-property-names-super.js
+++ b/deps/v8/test/mjsunit/es6/computed-property-names-super.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-computed-property-names --allow-natives-syntax
+// Flags: --allow-natives-syntax
function ID(x) {
diff --git a/deps/v8/test/mjsunit/harmony/computed-property-names.js b/deps/v8/test/mjsunit/es6/computed-property-names.js
index a559159380..d75278cfe3 100644
--- a/deps/v8/test/mjsunit/harmony/computed-property-names.js
+++ b/deps/v8/test/mjsunit/es6/computed-property-names.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-computed-property-names
-
function ID(x) {
return x;
diff --git a/deps/v8/test/mjsunit/es6/debug-blockscopes.js b/deps/v8/test/mjsunit/es6/debug-blockscopes.js
index 31208d41f4..3f890ebd54 100644
--- a/deps/v8/test/mjsunit/es6/debug-blockscopes.js
+++ b/deps/v8/test/mjsunit/es6/debug-blockscopes.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --allow-natives-syntax
// The functions used for testing backtraces. They are at the top to make the
// testing of source line/column easier.
@@ -187,6 +187,14 @@ function CheckScopeContent(content, number, exec_state) {
}
+function assertEqualsUnlessOptimized(expected, value, f) {
+ try {
+ assertEquals(expected, value);
+ } catch (e) {
+ assertOptimized(f);
+ }
+}
+
// Simple empty block scope in local scope.
BeginTest("Local block 1");
@@ -517,11 +525,11 @@ function shadowing_1() {
{
let i = 5;
debugger;
- assertEquals(27, i);
+ assertEqualsUnlessOptimized(27, i, shadowing_1);
}
assertEquals(0, i);
debugger;
- assertEquals(27, i);
+ assertEqualsUnlessOptimized(27, i, shadowing_1);
}
listener_delegate = function (exec_state) {
@@ -538,9 +546,9 @@ function shadowing_2() {
{
let j = 5;
debugger;
- assertEquals(27, j);
+ assertEqualsUnlessOptimized(27, j, shadowing_2);
}
- assertEquals(0, i);
+ assertEqualsUnlessOptimized(0, i, shadowing_2);
}
listener_delegate = function (exec_state) {
diff --git a/deps/v8/test/mjsunit/es6/debug-function-scopes.js b/deps/v8/test/mjsunit/es6/debug-function-scopes.js
index e7049ee3c0..c1a20e7b9e 100644
--- a/deps/v8/test/mjsunit/es6/debug-function-scopes.js
+++ b/deps/v8/test/mjsunit/es6/debug-function-scopes.js
@@ -45,7 +45,7 @@ function CheckScope(scope_mirror, scope_expectations, expected_scope_type) {
}
}
-// A copy of the scope types from mirror-debugger.js.
+// A copy of the scope types from debug/mirrors.js.
var ScopeType = { Global: 0,
Local: 1,
With: 2,
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/events.js b/deps/v8/test/mjsunit/es6/debug-promises/events.js
index a9f94543f4..3fcb22ff27 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/events.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/events.js
@@ -116,9 +116,7 @@ function testDone(iteration) {
}
var iteration = iteration || 0;
- var dummy = {};
- Object.observe(dummy, checkResult);
- dummy.dummy = dummy;
+ %EnqueueMicrotask(checkResult);
}
testDone();
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-all.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-all.js
index 0fca57730a..fd4770ebee 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-all.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-all.js
@@ -63,10 +63,7 @@ function testDone(iteration) {
}
}
- // Run testDone through the Object.observe processing loop.
- var dummy = {};
- Object.observe(dummy, checkResult);
- dummy.dummy = dummy;
+ %EnqueueMicrotask(checkResult);
}
testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js
index 63151df016..b7c5861c1f 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js
@@ -77,10 +77,7 @@ function testDone(iteration) {
}
}
- // Run testDone through the Object.observe processing loop.
- var dummy = {};
- Object.observe(dummy, checkResult);
- dummy.dummy = dummy;
+ %EnqueueMicrotask(checkResult);
}
testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js
index beaf1878fe..0b0c0c8e38 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js
@@ -60,10 +60,7 @@ function testDone(iteration) {
}
}
- // Run testDone through the Object.observe processing loop.
- var dummy = {};
- Object.observe(dummy, checkResult);
- dummy.dummy = dummy;
+ %EnqueueMicrotask(checkResult);
}
testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-late.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-late.js
index 4a883da13a..db58790e39 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-late.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-late.js
@@ -67,10 +67,7 @@ function testDone(iteration) {
}
}
- // Run testDone through the Object.observe processing loop.
- var dummy = {};
- Object.observe(dummy, checkResult);
- dummy.dummy = dummy;
+ %EnqueueMicrotask(checkResult);
}
testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js
index 86e2a815e7..ac23b48b6f 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js
@@ -60,10 +60,7 @@ function testDone(iteration) {
}
}
- // Run testDone through the Object.observe processing loop.
- var dummy = {};
- Object.observe(dummy, checkResult);
- dummy.dummy = dummy;
+ %EnqueueMicrotask(checkResult);
}
testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js
index fc6233da8d..fa263458c4 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js
@@ -66,10 +66,7 @@ function testDone(iteration) {
}
}
- // Run testDone through the Object.observe processing loop.
- var dummy = {};
- Object.observe(dummy, checkResult);
- dummy.dummy = dummy;
+ %EnqueueMicrotask(checkResult);
}
testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js
index 15e464ec60..6b7dc1a77c 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js
@@ -78,10 +78,7 @@ function testDone(iteration) {
}
}
- // Run testDone through the Object.observe processing loop.
- var dummy = {};
- Object.observe(dummy, checkResult);
- dummy.dummy = dummy;
+ %EnqueueMicrotask(checkResult);
}
testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js
index d11c01ff73..4c57cf0237 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js
@@ -66,10 +66,7 @@ function testDone(iteration) {
}
}
- // Run testDone through the Object.observe processing loop.
- var dummy = {};
- Object.observe(dummy, checkResult);
- dummy.dummy = dummy;
+ %EnqueueMicrotask(checkResult);
}
testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-all.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-all.js
index 2fbf05141d..bd6d343f82 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-all.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-all.js
@@ -62,10 +62,7 @@ function testDone(iteration) {
}
}
- // Run testDone through the Object.observe processing loop.
- var dummy = {};
- Object.observe(dummy, checkResult);
- dummy.dummy = dummy;
+ %EnqueueMicrotask(checkResult);
}
testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js
index 36b5565e5f..3c30ad3f7c 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js
@@ -78,10 +78,7 @@ function testDone(iteration) {
}
}
- // Run testDone through the Object.observe processing loop.
- var dummy = {};
- Object.observe(dummy, checkResult);
- dummy.dummy = dummy;
+ %EnqueueMicrotask(checkResult);
}
testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-all.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-all.js
index 72f800bf5b..c4bc6c44e3 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-all.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-all.js
@@ -61,10 +61,7 @@ function testDone(iteration) {
}
}
- // Rerun testDone through the Object.observe processing loop.
- var dummy = {};
- Object.observe(dummy, checkResult);
- dummy.dummy = dummy;
+ %EnqueueMicrotask(checkResult);
}
testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js
index 69aa8ebbd2..ba82a1f8cb 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js
@@ -61,10 +61,7 @@ function testDone(iteration) {
}
}
- // Run testDone through the Object.observe processing loop.
- var dummy = {};
- Object.observe(dummy, checkResult);
- dummy.dummy = dummy;
+ %EnqueueMicrotask(checkResult);
}
testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js
index 1ea1c7f9ff..bd39a155cc 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js
@@ -81,10 +81,7 @@ function testDone(iteration) {
}
}
- // Run testDone through the Object.observe processing loop.
- var dummy = {};
- Object.observe(dummy, checkResult);
- dummy.dummy = dummy;
+ %EnqueueMicrotask(checkResult);
}
testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js
index 94dcdffa22..c88feb9c39 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js
@@ -79,10 +79,7 @@ function testDone(iteration) {
}
}
- // Run testDone through the Object.observe processing loop.
- var dummy = {};
- Object.observe(dummy, checkResult);
- dummy.dummy = dummy;
+ %EnqueueMicrotask(checkResult);
}
testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js b/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js
index 8dbdb3457a..98510ff52b 100644
--- a/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js
+++ b/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-object-observe
// Flags: --allow-natives-syntax --expose-debug-as debug
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/es6/microtask-delivery.js b/deps/v8/test/mjsunit/es6/microtask-delivery.js
index f74385e635..01b971ddc0 100644
--- a/deps/v8/test/mjsunit/es6/microtask-delivery.js
+++ b/deps/v8/test/mjsunit/es6/microtask-delivery.js
@@ -25,6 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --harmony-object-observe
// Flags: --allow-natives-syntax
var ordering = [];
diff --git a/deps/v8/test/mjsunit/es6/promises.js b/deps/v8/test/mjsunit/es6/promises.js
index 63b6d2f94a..19239b601b 100644
--- a/deps/v8/test/mjsunit/es6/promises.js
+++ b/deps/v8/test/mjsunit/es6/promises.js
@@ -29,7 +29,6 @@
// Make sure we don't rely on functions patchable by monkeys.
var call = Function.prototype.call.call.bind(Function.prototype.call)
-var observe = Object.observe;
var getOwnPropertyNames = Object.getOwnPropertyNames;
var defineProperty = Object.defineProperty;
var numberPrototype = Number.prototype;
@@ -87,19 +86,15 @@ function assertAsync(b, s) {
}
function assertAsyncDone(iteration) {
- var iteration = iteration || 0
- var dummy = {}
- observe(dummy,
- function() {
- if (asyncAssertsExpected === 0)
- assertAsync(true, "all")
- else if (iteration > 10) // Shouldn't take more.
- assertAsync(false, "all")
- else
- assertAsyncDone(iteration + 1)
- }
- )
- dummy.dummy = dummy
+ var iteration = iteration || 0;
+ %EnqueueMicrotask(function() {
+ if (asyncAssertsExpected === 0)
+ assertAsync(true, "all")
+ else if (iteration > 10) // Shouldn't take more.
+ assertAsync(false, "all")
+ else
+ assertAsyncDone(iteration + 1)
+ });
}
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-3750.js b/deps/v8/test/mjsunit/es6/regress/regress-3750.js
index a425def2b7..10509bff51 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-3750.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-3750.js
@@ -1,6 +1,8 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// Flags: --harmony-object-observe
'use strict';
class Example { }
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-cr512574.js b/deps/v8/test/mjsunit/es6/regress/regress-cr512574.js
new file mode 100644
index 0000000000..2c10e19315
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/regress/regress-cr512574.js
@@ -0,0 +1,9 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-destructuring
+
+function f({}) {
+ for (var v in []);
+};
diff --git a/deps/v8/test/mjsunit/es6/templates.js b/deps/v8/test/mjsunit/es6/templates.js
index feb7364613..621b06074e 100644
--- a/deps/v8/test/mjsunit/es6/templates.js
+++ b/deps/v8/test/mjsunit/es6/templates.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-unicode
-
var num = 5;
var str = "str";
function fn() { return "result"; }
diff --git a/deps/v8/test/mjsunit/es6/typedarray-set-length.js b/deps/v8/test/mjsunit/es6/typedarray-set-length.js
new file mode 100644
index 0000000000..6dd5bf76e0
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/typedarray-set-length.js
@@ -0,0 +1,54 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var typedArrayConstructors = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array
+];
+
+var lengthCalled = false;
+function lengthValue() {
+ assertFalse(lengthCalled);
+ lengthCalled = true;
+ return 5;
+}
+
+// ToLength should convert these to usable lengths.
+var goodNonIntegerLengths = [
+ function() { return 4.6; },
+ function() { return -5; },
+ function() { return NaN; },
+ function() { return "5"; },
+ function() { return "abc"; },
+ function() { return true; },
+ function() { return null; },
+ function() { return undefined; }
+];
+
+// This will fail if you use ToLength on it.
+function badNonIntegerLength() {
+ return Symbol("5");
+}
+
+for (var constructor of typedArrayConstructors) {
+ lengthCalled = false;
+ var a = new constructor(10);
+ a.set({length: {valueOf: lengthValue}});
+ assertTrue(lengthCalled);
+
+ for (var lengthFun of goodNonIntegerLengths) {
+ a.set({length: {valueOf: lengthFun}});
+ }
+
+ assertThrows(function() {
+ a.set({length: {valueOf: badNonIntegerLength}});
+ }, TypeError);
+}
diff --git a/deps/v8/test/mjsunit/es6/typedarray.js b/deps/v8/test/mjsunit/es6/typedarray.js
index ef7955ce92..7b1cc06e1c 100644
--- a/deps/v8/test/mjsunit/es6/typedarray.js
+++ b/deps/v8/test/mjsunit/es6/typedarray.js
@@ -417,6 +417,7 @@ var typedArrayConstructors = [
function TestPropertyTypeChecks(constructor) {
function CheckProperty(name) {
+ assertThrows(function() { 'use strict'; new constructor(10)[name] = 0; })
var d = Object.getOwnPropertyDescriptor(constructor.prototype, name);
var o = {};
assertThrows(function() {d.get.call(o);}, TypeError);
@@ -756,3 +757,13 @@ TestArbitrary(new DataView(new ArrayBuffer(256)));
// Test direct constructor call
assertThrows(function() { ArrayBuffer(); }, TypeError);
assertThrows(function() { DataView(new ArrayBuffer()); }, TypeError);
+
+function TestNonConfigurableProperties(constructor) {
+ var arr = new constructor([100])
+ assertFalse(Object.getOwnPropertyDescriptor(arr,"0").configurable)
+ assertFalse(delete arr[0])
+}
+
+for(i = 0; i < typedArrayConstructors.length; i++) {
+ TestNonConfigurableProperties(typedArrayConstructors[i]);
+}
diff --git a/deps/v8/test/mjsunit/harmony/unicode-escapes.js b/deps/v8/test/mjsunit/es6/unicode-escapes.js
index b39ee1a5b0..be269366cf 100644
--- a/deps/v8/test/mjsunit/harmony/unicode-escapes.js
+++ b/deps/v8/test/mjsunit/es6/unicode-escapes.js
@@ -4,8 +4,6 @@
// ES6 extends the \uxxxx escape and also allows \u{xxxxx}.
-// Flags: --harmony-unicode
-
// Unicode escapes in variable names.
(function TestVariableNames1() {
diff --git a/deps/v8/test/mjsunit/es7/object-observe-debug-event.js b/deps/v8/test/mjsunit/es7/object-observe-debug-event.js
index ed627642cc..06123b8dc2 100644
--- a/deps/v8/test/mjsunit/es7/object-observe-debug-event.js
+++ b/deps/v8/test/mjsunit/es7/object-observe-debug-event.js
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-object-observe
// Flags: --expose-debug-as debug
Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/es7/object-observe-runtime.js b/deps/v8/test/mjsunit/es7/object-observe-runtime.js
index 769cd1b296..1a07141af6 100644
--- a/deps/v8/test/mjsunit/es7/object-observe-runtime.js
+++ b/deps/v8/test/mjsunit/es7/object-observe-runtime.js
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-object-observe
// Flags: --allow-natives-syntax
// These tests are meant to ensure that that the Object.observe runtime
diff --git a/deps/v8/test/mjsunit/es7/object-observe.js b/deps/v8/test/mjsunit/es7/object-observe.js
index b2853c4048..5a252a3745 100644
--- a/deps/v8/test/mjsunit/es7/object-observe.js
+++ b/deps/v8/test/mjsunit/es7/object-observe.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-proxies
+// Flags: --harmony-proxies --harmony-object-observe
// Flags: --allow-natives-syntax
var allObservers = [];
diff --git a/deps/v8/test/mjsunit/function-bind.js b/deps/v8/test/mjsunit/function-bind.js
index 23dacf157e..ca1ed7e489 100644
--- a/deps/v8/test/mjsunit/function-bind.js
+++ b/deps/v8/test/mjsunit/function-bind.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
+
// Tests the Function.prototype.bind (ES 15.3.4.5) method.
// Simple tests.
@@ -298,3 +300,20 @@ assertThrows(function() { f.arguments = 42; }, TypeError);
// the caller is strict and the callee isn't. A bound function is built-in,
// but not considered strict.
(function foo() { return foo.caller; }).bind()();
+
+
+(function TestProtoIsPreserved() {
+ function fun() {}
+
+ function proto() {}
+ Object.setPrototypeOf(fun, proto);
+ var bound = fun.bind({});
+ assertEquals(proto, Object.getPrototypeOf(bound));
+
+ var bound2 = fun.bind({});
+ assertTrue(%HaveSameMap(new bound, new bound2));
+
+ Object.setPrototypeOf(fun, null);
+ bound = Function.prototype.bind.call(fun, {});
+ assertEquals(null, Object.getPrototypeOf(bound));
+})();
diff --git a/deps/v8/test/mjsunit/harmony/atomics.js b/deps/v8/test/mjsunit/harmony/atomics.js
index ff403b8bd1..bff9f95a81 100644
--- a/deps/v8/test/mjsunit/harmony/atomics.js
+++ b/deps/v8/test/mjsunit/harmony/atomics.js
@@ -123,6 +123,21 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
assertEquals(undefined, Atomics.xor(si32a, i, 0), name);
assertEquals(undefined, Atomics.exchange(si32a, i, 0), name);
});
+
+ // Monkey-patch length and make sure these functions still return undefined.
+ Object.defineProperty(si32a, 'length', {get: function() { return 1000; }});
+ [2, 100].forEach(function(i) {
+ var name = String(i);
+ assertEquals(undefined, Atomics.compareExchange(si32a, i, 0, 0), name);
+ assertEquals(undefined, Atomics.load(si32a, i), name);
+ assertEquals(undefined, Atomics.store(si32a, i, 0), name);
+ assertEquals(undefined, Atomics.add(si32a, i, 0), name);
+ assertEquals(undefined, Atomics.sub(si32a, i, 0), name);
+ assertEquals(undefined, Atomics.and(si32a, i, 0), name);
+ assertEquals(undefined, Atomics.or(si32a, i, 0), name);
+ assertEquals(undefined, Atomics.xor(si32a, i, 0), name);
+ assertEquals(undefined, Atomics.exchange(si32a, i, 0), name);
+ });
})();
(function TestGoodIndex() {
@@ -344,6 +359,58 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
}
})();
+(function TestToNumber() {
+ IntegerTypedArrayConstructors.forEach(function(t) {
+ var sab = new SharedArrayBuffer(1 * t.constr.BYTES_PER_ELEMENT);
+ var sta = new t.constr(sab);
+
+ var valueOf = {valueOf: function(){ return 3;}};
+ var toString = {toString: function(){ return '3';}};
+
+ [false, true, undefined, valueOf, toString].forEach(function(v) {
+ var name = Object.prototype.toString.call(sta) + ' - ' + v;
+
+ // CompareExchange
+ sta[0] = 50;
+ assertEquals(50, Atomics.compareExchange(sta, 0, v, v), name);
+
+ // Store
+ assertEquals(+v, Atomics.store(sta, 0, v), name);
+ assertEquals(v|0, sta[0], name);
+
+ // Add
+ sta[0] = 120;
+ assertEquals(120, Atomics.add(sta, 0, v), name);
+ assertEquals(120 + (v|0), sta[0], name);
+
+ // Sub
+ sta[0] = 70;
+ assertEquals(70, Atomics.sub(sta, 0, v), name);
+ assertEquals(70 - (v|0), sta[0]);
+
+ // And
+ sta[0] = 0x20;
+ assertEquals(0x20, Atomics.and(sta, 0, v), name);
+ assertEquals(0x20 & (v|0), sta[0]);
+
+ // Or
+ sta[0] = 0x3d;
+ assertEquals(0x3d, Atomics.or(sta, 0, v), name);
+ assertEquals(0x3d | (v|0), sta[0]);
+
+ // Xor
+ sta[0] = 0x25;
+ assertEquals(0x25, Atomics.xor(sta, 0, v), name);
+ assertEquals(0x25 ^ (v|0), sta[0]);
+
+ // Exchange
+ sta[0] = 0x09;
+ assertEquals(0x09, Atomics.exchange(sta, 0, v), name);
+ assertEquals(v|0, sta[0]);
+ });
+ });
+})();
+
(function TestWrapping() {
IntegerTypedArrayConstructors.forEach(function(t) {
var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
diff --git a/deps/v8/test/mjsunit/harmony/block-conflicts-sloppy.js b/deps/v8/test/mjsunit/harmony/block-conflicts-sloppy.js
new file mode 100644
index 0000000000..ad947700ac
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/block-conflicts-sloppy.js
@@ -0,0 +1,179 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test for conflicting variable bindings.
+
+// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let --harmony-sloppy-function
+
+function CheckException(e) {
+ var string = e.toString();
+ assertTrue(string.indexOf("has already been declared") >= 0 ||
+ string.indexOf("redeclaration") >= 0);
+ return 'Conflict';
+}
+
+
+function TestGlobal(s,e) {
+ try {
+ return eval(s + e);
+ } catch (x) {
+ return CheckException(x);
+ }
+}
+
+
+function TestFunction(s,e) {
+ try {
+ return eval("(function(){" + s + " return " + e + "})")();
+ } catch (x) {
+ return CheckException(x);
+ }
+}
+
+
+function TestBlock(s,e) {
+ try {
+ return eval("(function(){ {" + s + "} return " + e + "})")();
+ } catch (x) {
+ return CheckException(x);
+ }
+}
+
+function TestAll(expected,s,opt_e) {
+ var e = "";
+ var msg = s;
+ if (opt_e) { e = opt_e; msg += opt_e; }
+ // TODO(littledan): Add tests using Realm.eval to ensure that global eval
+ // works as expected.
+ assertEquals(expected === 'LocalConflict' ? 'NoConflict' : expected,
+ TestGlobal(s,e), "global:'" + msg + "'");
+ assertEquals(expected === 'LocalConflict' ? 'NoConflict' : expected,
+ TestFunction(s,e), "function:'" + msg + "'");
+ assertEquals(expected === 'LocalConflict' ? 'Conflict' : expected,
+ TestBlock(s,e), "block:'" + msg + "'");
+}
+
+
+function TestConflict(s) {
+ TestAll('Conflict', s);
+ TestAll('Conflict', 'eval("' + s + '");');
+}
+
+function TestNoConflict(s) {
+ TestAll('NoConflict', s, "'NoConflict'");
+ TestAll('NoConflict', 'eval("' + s + '");', "'NoConflict'");
+}
+
+function TestLocalConflict(s) {
+ TestAll('LocalConflict', s, "'NoConflict'");
+ TestAll('NoConflict', 'eval("' + s + '");', "'NoConflict'");
+}
+
+var letbinds = [ "let x;",
+ "let x = 0;",
+ "let x = undefined;",
+ "let x = function() {};",
+ "let x, y;",
+ "let y, x;",
+ "const x = 0;",
+ "const x = undefined;",
+ "const x = function() {};",
+ "const x = 2, y = 3;",
+ "const y = 4, x = 5;",
+ "class x { }",
+ ];
+function forCompatible(bind) {
+ return !bind.startsWith('class');
+}
+var varbinds = [ "var x;",
+ "var x = 0;",
+ "var x = undefined;",
+ "var x = function() {};",
+ "var x, y;",
+ "var y, x;",
+ ];
+var funbind = "function x() {}";
+
+for (var l = 0; l < letbinds.length; ++l) {
+ // Test conflicting let/var bindings.
+ for (var v = 0; v < varbinds.length; ++v) {
+ // Same level.
+ TestConflict(letbinds[l] + varbinds[v]);
+ TestConflict(varbinds[v] + letbinds[l]);
+ // Different level.
+ TestConflict(letbinds[l] + '{' + varbinds[v] + '}');
+ TestConflict('{' + varbinds[v] +'}' + letbinds[l]);
+ TestNoConflict(varbinds[v] + '{' + letbinds[l] + '}');
+ TestNoConflict('{' + letbinds[l] + '}' + varbinds[v]);
+ // For loop.
+ if (forCompatible(letbinds[l])) {
+ TestConflict('for (' + letbinds[l] + '0;) {' + varbinds[v] + '}');
+ }
+ TestNoConflict('for (' + varbinds[v] + '0;) {' + letbinds[l] + '}');
+ }
+
+ // Test conflicting let/let bindings.
+ for (var k = 0; k < letbinds.length; ++k) {
+ // Same level.
+ TestConflict(letbinds[l] + letbinds[k]);
+ TestConflict(letbinds[k] + letbinds[l]);
+ // Different level.
+ TestNoConflict(letbinds[l] + '{ ' + letbinds[k] + '}');
+ TestNoConflict('{' + letbinds[k] +'} ' + letbinds[l]);
+ // For loop.
+ if (forCompatible(letbinds[l])) {
+ TestNoConflict('for (' + letbinds[l] + '0;) {' + letbinds[k] + '}');
+ }
+ if (forCompatible(letbinds[k])) {
+ TestNoConflict('for (' + letbinds[k] + '0;) {' + letbinds[l] + '}');
+ }
+ }
+
+ // Test conflicting function/let bindings.
+ // Same level.
+ TestConflict(letbinds[l] + funbind);
+ TestConflict(funbind + letbinds[l]);
+ // Different level.
+ TestNoConflict(letbinds[l] + '{' + funbind + '}');
+ TestNoConflict('{' + funbind + '}' + letbinds[l]);
+ TestNoConflict(funbind + '{' + letbinds[l] + '}');
+ TestNoConflict('{' + letbinds[l] + '}' + funbind);
+ // For loop.
+ if (forCompatible(letbinds[l])) {
+ TestNoConflict('for (' + letbinds[l] + '0;) {' + funbind + '}');
+ }
+
+ // Test conflicting parameter/let bindings.
+ TestConflict('(function(x) {' + letbinds[l] + '})();');
+}
+
+// Test conflicting function/var bindings.
+for (var v = 0; v < varbinds.length; ++v) {
+ // Same level.
+ TestLocalConflict(varbinds[v] + funbind);
+ TestLocalConflict(funbind + varbinds[v]);
+ // Different level.
+ TestLocalConflict(funbind + '{' + varbinds[v] + '}');
+ TestLocalConflict('{' + varbinds[v] +'}' + funbind);
+ TestNoConflict(varbinds[v] + '{' + funbind + '}');
+ TestNoConflict('{' + funbind + '}' + varbinds[v]);
+ // For loop.
+ TestNoConflict('for (' + varbinds[v] + '0;) {' + funbind + '}');
+}
+
+// Test conflicting catch/var bindings.
+for (var v = 0; v < varbinds.length; ++v) {
+ TestNoConflict('try {} catch(x) {' + varbinds[v] + '}');
+}
+
+// Test conflicting parameter/var bindings.
+for (var v = 0; v < varbinds.length; ++v) {
+ TestNoConflict('(function (x) {' + varbinds[v] + '})();');
+}
+
+// Test conflicting catch/function bindings.
+TestNoConflict('try {} catch(x) {' + funbind + '}');
+
+// Test conflicting parameter/function bindings.
+TestNoConflict('(function (x) {' + funbind + '})();');
diff --git a/deps/v8/test/mjsunit/harmony/block-const-assign-sloppy.js b/deps/v8/test/mjsunit/harmony/block-const-assign-sloppy.js
new file mode 100644
index 0000000000..506847c5b6
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/block-const-assign-sloppy.js
@@ -0,0 +1,158 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let
+
+// Test that we throw early syntax errors in harmony mode
+// when using an immutable binding in an assigment or with
+// prefix/postfix decrement/increment operators.
+
+const decls = [
+ // Const declaration.
+ function(use) { return "const c = 1; " + use + ";" }, TypeError,
+ function(use) { return "const x = 0, c = 1; " + use + ";" }, TypeError,
+ function(use) { return "const c = 1, x = (" + use + ");" }, TypeError,
+ function(use) { return use + "; const c = 1;" }, ReferenceError,
+ function(use) { return use + "; const x = 0, c = 1;" }, ReferenceError,
+ function(use) { return "const x = (" + use + "), c = 1;" }, ReferenceError,
+ function(use) { return "const c = (" + use + ");" }, ReferenceError,
+
+ // Function expression.
+ function(use) { return "(function c() { " + use + "; })();"; }, TypeError,
+ // TODO(rossberg): Once we have default parameters, test using 'c' there.
+
+ // Class expression.
+ function(use) {
+ return "new class c { constructor() { " + use + " } };";
+ }, TypeError,
+ function(use) {
+ return "(new class c { m() { " + use + " } }).m();";
+ }, TypeError,
+ function(use) {
+ return "(new class c { get a() { " + use + " } }).a;";
+ }, TypeError,
+ function(use) {
+ return "(new class c { set a(x) { " + use + " } }).a = 0;";
+ }, TypeError,
+ function(use) {
+ return "(class c { static m() { " + use + " } }).s();";
+ }, TypeError,
+ function(use) {
+ return "(class c extends (" + use + ") {});";
+ }, ReferenceError,
+ function(use) {
+ return "(class c { [" + use + "]() {} });";
+ }, ReferenceError,
+ function(use) {
+ return "(class c { get [" + use + "]() {} });";
+ }, ReferenceError,
+ function(use) {
+ return "(class c { set [" + use + "](x) {} });";
+ }, ReferenceError,
+ function(use) {
+ return "(class c { static [" + use + "]() {} });";
+ }, ReferenceError,
+
+ // For loop.
+ function(use) {
+ return "for (const c = 0; " + use + ";) {}"
+ }, TypeError,
+ function(use) {
+ return "for (const x = 0, c = 0; " + use + ";) {}"
+ }, TypeError,
+ function(use) {
+ return "for (const c = 0; ; " + use + ") {}"
+ }, TypeError,
+ function(use) {
+ return "for (const x = 0, c = 0; ; " + use + ") {}"
+ }, TypeError,
+ function(use) {
+ return "for (const c = 0; ;) { " + use + "; }"
+ }, TypeError,
+ function(use) {
+ return "for (const x = 0, c = 0; ;) { " + use + "; }"
+ }, TypeError,
+ function(use) {
+ return "for (const c in {a: 1}) { " + use + "; }"
+ }, TypeError,
+ function(use) {
+ return "for (const c of [1]) { " + use + "; }"
+ }, TypeError,
+ function(use) {
+ return "for (const x = (" + use + "), c = 0; ;) {}"
+ }, ReferenceError,
+ function(use) {
+ return "for (const c = (" + use + "); ;) {}"
+ }, ReferenceError,
+]
+
+let uses = [
+ 'c = 1',
+ 'c += 1',
+ '++c',
+ 'c--',
+];
+
+let declcontexts = [
+ function(decl) { return decl; },
+ function(decl) { return "eval(\'" + decl + "\')"; },
+ function(decl) { return "{ " + decl + " }"; },
+ function(decl) { return "(function() { " + decl + " })()"; },
+];
+
+let usecontexts = [
+ function(use) { return use; },
+ function(use) { return "eval(\"" + use + "\")"; },
+ function(use) { return "(function() { " + use + " })()"; },
+ function(use) { return "(function() { eval(\"" + use + "\"); })()"; },
+ function(use) { return "eval(\"(function() { " + use + "; })\")()"; },
+];
+
+function Test(program, error) {
+ program = "'use strict'; " + program;
+ try {
+ print(program, " // throw " + error.name);
+ eval(program);
+ } catch (e) {
+ assertInstanceof(e, error);
+ if (e === TypeError) {
+ assertTrue(e.toString().indexOf("Assignment to constant variable") >= 0);
+ }
+ return;
+ }
+ assertUnreachable();
+}
+
+for (var d = 0; d < decls.length; d += 2) {
+ for (var u = 0; u < uses.length; ++u) {
+ for (var o = 0; o < declcontexts.length; ++o) {
+ for (var i = 0; i < usecontexts.length; ++i) {
+ Test(declcontexts[o](decls[d](usecontexts[i](uses[u]))), decls[d + 1]);
+ }
+ }
+ }
+}
diff --git a/deps/v8/test/mjsunit/harmony/block-for-sloppy.js b/deps/v8/test/mjsunit/harmony/block-for-sloppy.js
new file mode 100644
index 0000000000..eee8e0b5cd
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/block-for-sloppy.js
@@ -0,0 +1,199 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let
+
+function props(x) {
+ var array = [];
+ for (let p in x) array.push(p);
+ return array.sort();
+}
+
+assertEquals(0, props({}).length);
+assertEquals(1, props({x:1}).length);
+assertEquals(2, props({x:1, y:2}).length);
+
+assertArrayEquals(["x"], props({x:1}));
+assertArrayEquals(["x", "y"], props({x:1, y:2}));
+assertArrayEquals(["x", "y", "zoom"], props({x:1, y:2, zoom:3}));
+
+assertEquals(0, props([]).length);
+assertEquals(1, props([1]).length);
+assertEquals(2, props([1,2]).length);
+
+assertArrayEquals(["0"], props([1]));
+assertArrayEquals(["0", "1"], props([1,2]));
+assertArrayEquals(["0", "1", "2"], props([1,2,3]));
+
+var o = {};
+var a = [];
+let i = "outer_i";
+let s = "outer_s";
+for (let i = 0x0020; i < 0x01ff; i+=2) {
+ let s = 'char:' + String.fromCharCode(i);
+ a.push(s);
+ o[s] = i;
+}
+assertArrayEquals(a, props(o));
+assertEquals(i, "outer_i");
+assertEquals(s, "outer_s");
+
+var a = [];
+assertEquals(0, props(a).length);
+a[Math.pow(2,30)-1] = 0;
+assertEquals(1, props(a).length);
+a[Math.pow(2,31)-1] = 0;
+assertEquals(2, props(a).length);
+a[1] = 0;
+assertEquals(3, props(a).length);
+
+var result = '';
+for (let p in {a : [0], b : 1}) { result += p; }
+assertEquals('ab', result);
+
+var result = '';
+for (let p in {a : {v:1}, b : 1}) { result += p; }
+assertEquals('ab', result);
+
+var result = '';
+for (let p in { get a() {}, b : 1}) { result += p; }
+assertEquals('ab', result);
+
+var result = '';
+for (let p in { get a() {}, set a(x) {}, b : 1}) { result += p; }
+assertEquals('ab', result);
+
+
+// Check that there is exactly one variable without initializer
+// in a for-in statement with let variables.
+assertThrows("function foo() { 'use strict'; for (let in {}) { } }", SyntaxError);
+assertThrows("function foo() { 'use strict'; for (let x = 3 in {}) { } }", SyntaxError);
+assertThrows("function foo() { 'use strict'; for (let x, y in {}) { } }", SyntaxError);
+assertThrows("function foo() { 'use strict'; for (let x = 3, y in {}) { } }", SyntaxError);
+assertThrows("function foo() { 'use strict'; for (let x, y = 4 in {}) { } }", SyntaxError);
+assertThrows("function foo() { 'use strict'; for (let x = 3, y = 4 in {}) { } }", SyntaxError);
+
+
+// In a normal for statement the iteration variable is
+// freshly allocated for each iteration.
+function closures1() {
+ let a = [];
+ for (let i = 0; i < 5; ++i) {
+ a.push(function () { return i; });
+ }
+ for (let j = 0; j < 5; ++j) {
+ assertEquals(j, a[j]());
+ }
+}
+closures1();
+
+
+function closures2() {
+ let a = [], b = [];
+ for (let i = 0, j = 10; i < 5; ++i, ++j) {
+ a.push(function () { return i; });
+ b.push(function () { return j; });
+ }
+ for (let k = 0; k < 5; ++k) {
+ assertEquals(k, a[k]());
+ assertEquals(k + 10, b[k]());
+ }
+}
+closures2();
+
+
+function closure_in_for_init() {
+ let a = [];
+ for (let i = 0, f = function() { return i }; i < 5; ++i) {
+ a.push(f);
+ }
+ for (let k = 0; k < 5; ++k) {
+ assertEquals(0, a[k]());
+ }
+}
+closure_in_for_init();
+
+
+function closure_in_for_cond() {
+ let a = [];
+ for (let i = 0; a.push(function () { return i; }), i < 5; ++i) { }
+ for (let k = 0; k < 5; ++k) {
+ assertEquals(k, a[k]());
+ }
+}
+closure_in_for_cond();
+
+
+function closure_in_for_next() {
+ let a = [];
+ for (let i = 0; i < 5; a.push(function () { return i; }), ++i) { }
+ for (let k = 0; k < 5; ++k) {
+ assertEquals(k + 1, a[k]());
+ }
+}
+closure_in_for_next();
+
+
+// In a for-in statement the iteration variable is fresh
+// for each iteration.
+function closures3(x) {
+ let a = [];
+ for (let p in x) {
+ a.push(function () { return p; });
+ }
+ let k = 0;
+ for (let q in x) {
+ assertEquals(q, a[k]());
+ ++k;
+ }
+}
+closures3({a : [0], b : 1, c : {v : 1}, get d() {}, set e(x) {}});
+
+// Check normal for statement completion values.
+assertEquals(1, eval("for (let i = 0; i < 10; i++) { 1; }"));
+assertEquals(9, eval("for (let i = 0; i < 10; i++) { i; }"));
+assertEquals(undefined, eval("for (let i = 0; false;) { }"));
+assertEquals(undefined, eval("for (const i = 0; false;) { }"));
+assertEquals(undefined, eval("for (let i = 0; i < 10; i++) { }"));
+assertEquals(undefined, eval("for (let i = 0; false;) { i; }"));
+assertEquals(undefined, eval("for (const i = 0; false;) { i; }"));
+assertEquals(undefined, eval("for (let i = 0; true;) { break; }"));
+assertEquals(undefined, eval("for (const i = 0; true;) { break; }"));
+assertEquals(undefined, eval("for (let i = 0; i < 10; i++) { continue; }"));
+assertEquals(undefined, eval("for (let i = 0; true;) { break; i; }"));
+assertEquals(undefined, eval("for (const i = 0; true;) { break; i; }"));
+assertEquals(undefined, eval("for (let i = 0; i < 10; i++) { continue; i; }"));
+assertEquals(0, eval("for (let i = 0; true;) { i; break; }"));
+assertEquals(0, eval("for (const i = 0; true;) { i; break; }"));
+assertEquals(9, eval("for (let i = 0; i < 10; i++) { i; continue; }"));
+assertEquals(3, eval("for (let i = 0; true; i++) { i; if (i >= 3) break; }"));
+assertEquals(2, eval("for (let i = 0; true; i++) { if (i >= 3) break; i; }"));
+assertEquals(
+ 2, eval("for (let i = 0; i < 10; i++) { if (i >= 3) continue; i; }"));
+assertEquals(undefined, eval("foo: for (let i = 0; true;) { break foo; }"));
+assertEquals(undefined, eval("foo: for (const i = 0; true;) { break foo; }"));
+assertEquals(3, eval("foo: for (let i = 3; true;) { i; break foo; }"));
diff --git a/deps/v8/test/mjsunit/harmony/block-leave-sloppy.js b/deps/v8/test/mjsunit/harmony/block-leave-sloppy.js
new file mode 100644
index 0000000000..fe21341c2e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/block-leave-sloppy.js
@@ -0,0 +1,224 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let
+
+// We want to test the context chain shape. In each of the tests cases
+// below, the outer with is to force a runtime lookup of the identifier 'x'
+// to actually verify that the inner context has been discarded. A static
+// lookup of 'x' might accidentally succeed.
+
+{
+ let x = 2;
+ L: {
+ let x = 3;
+ assertEquals(3, x);
+ break L;
+ assertTrue(false);
+ }
+ assertEquals(2, x);
+}
+
+do {
+ let x = 4;
+ assertEquals(4,x);
+ {
+ let x = 5;
+ assertEquals(5, x);
+ continue;
+ assertTrue(false);
+ }
+} while (false);
+
+var caught = false;
+try {
+ {
+ let xx = 18;
+ throw 25;
+ assertTrue(false);
+ }
+} catch (e) {
+ caught = true;
+ assertEquals(25, e);
+ (function () {
+ try {
+ // NOTE: This checks that the block scope containing xx has been
+ // removed from the context chain.
+ eval('xx');
+ assertTrue(false); // should not reach here
+ } catch (e2) {
+ assertTrue(e2 instanceof ReferenceError);
+ }
+ })();
+}
+assertTrue(caught);
+
+
+(function(x) {
+ label: {
+ let x = 'inner';
+ break label;
+ }
+ assertEquals('outer', eval('x'));
+})('outer');
+
+
+(function(x) {
+ label: {
+ let x = 'middle';
+ {
+ let x = 'inner';
+ break label;
+ }
+ }
+ assertEquals('outer', eval('x'));
+})('outer');
+
+
+(function(x) {
+ for (var i = 0; i < 10; ++i) {
+ let x = 'inner' + i;
+ continue;
+ }
+ assertEquals('outer', eval('x'));
+})('outer');
+
+
+(function(x) {
+ label: for (var i = 0; i < 10; ++i) {
+ let x = 'middle' + i;
+ for (var j = 0; j < 10; ++j) {
+ let x = 'inner' + j;
+ continue label;
+ }
+ }
+ assertEquals('outer', eval('x'));
+})('outer');
+
+
+(function(x) {
+ try {
+ let x = 'inner';
+ throw 0;
+ } catch (e) {
+ assertEquals('outer', eval('x'));
+ }
+})('outer');
+
+
+(function(x) {
+ try {
+ let x = 'middle';
+ {
+ let x = 'inner';
+ throw 0;
+ }
+ } catch (e) {
+ assertEquals('outer', eval('x'));
+ }
+})('outer');
+
+
+try {
+ (function(x) {
+ try {
+ let x = 'inner';
+ throw 0;
+ } finally {
+ assertEquals('outer', eval('x'));
+ }
+ })('outer');
+} catch (e) {
+ if (e instanceof MjsUnitAssertionError) throw e;
+}
+
+
+try {
+ (function(x) {
+ try {
+ let x = 'middle';
+ {
+ let x = 'inner';
+ throw 0;
+ }
+ } finally {
+ assertEquals('outer', eval('x'));
+ }
+ })('outer');
+} catch (e) {
+ if (e instanceof MjsUnitAssertionError) throw e;
+}
+
+
+// Verify that the context is correctly set in the stack frame after exiting
+// from eval.
+function f() {}
+
+(function(x) {
+ label: {
+ let x = 'inner';
+ break label;
+ }
+ f(); // The context could be restored from the stack after the call.
+ assertEquals('outer', eval('x'));
+})('outer');
+
+
+(function(x) {
+ for (var i = 0; i < 10; ++i) {
+ let x = 'inner';
+ continue;
+ }
+ f();
+ assertEquals('outer', eval('x'));
+})('outer');
+
+
+(function(x) {
+ try {
+ let x = 'inner';
+ throw 0;
+ } catch (e) {
+ f();
+ assertEquals('outer', eval('x'));
+ }
+})('outer');
+
+
+try {
+ (function(x) {
+ try {
+ let x = 'inner';
+ throw 0;
+ } finally {
+ f();
+ assertEquals('outer', eval('x'));
+ }
+ })('outer');
+} catch (e) {
+ if (e instanceof MjsUnitAssertionError) throw e;
+}
diff --git a/deps/v8/test/mjsunit/harmony/block-let-crankshaft-sloppy.js b/deps/v8/test/mjsunit/harmony/block-let-crankshaft-sloppy.js
new file mode 100644
index 0000000000..dc5cdfb5b7
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/block-let-crankshaft-sloppy.js
@@ -0,0 +1,483 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let
+
+// Check that the following functions are optimizable.
+var functions = [ f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14,
+ f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26,
+ f27, f28, f29, f30, f31, f32, f33];
+
+for (var i = 0; i < functions.length; ++i) {
+ var func = functions[i];
+ print("Testing:");
+ print(func);
+ for (var j = 0; j < 10; ++j) {
+ func(12);
+ }
+ %OptimizeFunctionOnNextCall(func);
+ func(12);
+ assertOptimized(func);
+}
+
+function f1() { }
+
+function f2(x) { }
+
+function f3() {
+ let x;
+}
+
+function f4() {
+ function foo() {
+ }
+}
+
+function f5() {
+ let x = 1;
+}
+
+function f6() {
+ const x = 1;
+}
+
+function f7(x) {
+ return x;
+}
+
+function f8() {
+ let x;
+ return x;
+}
+
+function f9() {
+ function x() {
+ }
+ return x;
+}
+
+function f10(x) {
+ x = 1;
+}
+
+function f11() {
+ let x;
+ x = 1;
+}
+
+function f12() {
+ function x() {};
+ x = 1;
+}
+
+function f13(x) {
+ (function() { x; });
+}
+
+function f14() {
+ let x;
+ (function() { x; });
+}
+
+function f15() {
+ function x() {
+ }
+ (function() { x; });
+}
+
+function f16() {
+ let x = 1;
+ (function() { x; });
+}
+
+function f17() {
+ const x = 1;
+ (function() { x; });
+}
+
+function f18(x) {
+ return x;
+ (function() { x; });
+}
+
+function f19() {
+ let x;
+ return x;
+ (function() { x; });
+}
+
+function f20() {
+ function x() {
+ }
+ return x;
+ (function() { x; });
+}
+
+function f21(x) {
+ x = 1;
+ (function() { x; });
+}
+
+function f22() {
+ let x;
+ x = 1;
+ (function() { x; });
+}
+
+function f23() {
+ function x() { }
+ x = 1;
+ (function() { x; });
+}
+
+function f24() {
+ let x = 1;
+ {
+ let x = 2;
+ {
+ let x = 3;
+ assertEquals(3, x);
+ }
+ assertEquals(2, x);
+ }
+ assertEquals(1, x);
+}
+
+function f25() {
+ {
+ let x = 2;
+ L: {
+ let x = 3;
+ assertEquals(3, x);
+ break L;
+ assertTrue(false);
+ }
+ assertEquals(2, x);
+ }
+ assertTrue(true);
+}
+
+function f26() {
+ {
+ let x = 1;
+ L: {
+ let x = 2;
+ {
+ let x = 3;
+ assertEquals(3, x);
+ break L;
+ assertTrue(false);
+ }
+ assertTrue(false);
+ }
+ assertEquals(1, x);
+ }
+}
+
+
+function f27() {
+ do {
+ let x = 4;
+ assertEquals(4,x);
+ {
+ let x = 5;
+ assertEquals(5, x);
+ continue;
+ assertTrue(false);
+ }
+ } while (false);
+}
+
+function f28() {
+ label: for (var i = 0; i < 10; ++i) {
+ let x = 'middle' + i;
+ for (var j = 0; j < 10; ++j) {
+ let x = 'inner' + j;
+ continue label;
+ }
+ }
+}
+
+function f29() {
+ // Verify that the context is correctly set in the stack frame after exiting
+ // from with.
+
+ let x = 'outer';
+ label: {
+ let x = 'inner';
+ break label;
+ }
+ f(); // The context could be restored from the stack after the call.
+ assertEquals('outer', x);
+
+ function f() {
+ assertEquals('outer', x);
+ };
+}
+
+function f30() {
+ let x = 'outer';
+ for (var i = 0; i < 10; ++i) {
+ let x = 'inner';
+ continue;
+ }
+ f();
+ assertEquals('outer', x);
+
+ function f() {
+ assertEquals('outer', x);
+ };
+}
+
+function f31() {
+ {
+ let x = 'outer';
+ label: for (var i = 0; assertEquals('outer', x), i < 10; ++i) {
+ let x = 'middle' + i;
+ {
+ let x = 'inner' + j;
+ continue label;
+ }
+ }
+ assertEquals('outer', x);
+ }
+}
+
+var c = true;
+
+function f32() {
+ {
+ let x = 'outer';
+ L: {
+ {
+ let x = 'inner';
+ if (c) {
+ break L;
+ }
+ }
+ foo();
+ }
+ }
+
+ function foo() {
+ return 'bar';
+ }
+}
+
+function f33() {
+ {
+ let x = 'outer';
+ L: {
+ {
+ let x = 'inner';
+ if (c) {
+ break L;
+ }
+ foo();
+ }
+ }
+ }
+
+ function foo() {
+ return 'bar';
+ }
+}
+
+function TestThrow() {
+ function f() {
+ let x = 'outer';
+ {
+ let x = 'inner';
+ throw x;
+ }
+ }
+ for (var i = 0; i < 5; i++) {
+ try {
+ f();
+ } catch (e) {
+ assertEquals('inner', e);
+ }
+ }
+ %OptimizeFunctionOnNextCall(f);
+ try {
+ f();
+ } catch (e) {
+ assertEquals('inner', e);
+ }
+ assertOptimized(f);
+}
+
+TestThrow();
+
+// Test that temporal dead zone semantics for function and block scoped
+// let bindings are handled by the optimizing compiler.
+
+function TestFunctionLocal(s) {
+ 'use strict';
+ var func = eval("(function baz(){" + s + "; })");
+ print("Testing:");
+ print(func);
+ for (var i = 0; i < 5; ++i) {
+ try {
+ func();
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, ReferenceError);
+ }
+ }
+ %OptimizeFunctionOnNextCall(func);
+ try {
+ func();
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, ReferenceError);
+ }
+}
+
+function TestFunctionContext(s) {
+ 'use strict';
+ var func = eval("(function baz(){ " + s + "; (function() { x; }); })");
+ print("Testing:");
+ print(func);
+ for (var i = 0; i < 5; ++i) {
+ print(i);
+ try {
+ func();
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, ReferenceError);
+ }
+ }
+ print("optimize");
+ %OptimizeFunctionOnNextCall(func);
+ try {
+ print("call");
+ func();
+ assertUnreachable();
+ } catch (e) {
+ print("catch");
+ assertInstanceof(e, ReferenceError);
+ }
+}
+
+function TestBlockLocal(s) {
+ 'use strict';
+ var func = eval("(function baz(){ { " + s + "; } })");
+ print("Testing:");
+ print(func);
+ for (var i = 0; i < 5; ++i) {
+ try {
+ func();
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, ReferenceError);
+ }
+ }
+ %OptimizeFunctionOnNextCall(func);
+ try {
+ func();
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, ReferenceError);
+ }
+}
+
+function TestBlockContext(s) {
+ 'use strict';
+ var func = eval("(function baz(){ { " + s + "; (function() { x; }); } })");
+ print("Testing:");
+ print(func);
+ for (var i = 0; i < 5; ++i) {
+ print(i);
+ try {
+ func();
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, ReferenceError);
+ }
+ }
+ print("optimize");
+ %OptimizeFunctionOnNextCall(func);
+ try {
+ print("call");
+ func();
+ assertUnreachable();
+ } catch (e) {
+ print("catch");
+ assertInstanceof(e, ReferenceError);
+ }
+}
+
+function TestAll(s) {
+ TestFunctionLocal(s);
+ TestFunctionContext(s);
+ TestBlockLocal(s);
+ TestBlockContext(s);
+}
+
+// Use before initialization in declaration statement.
+TestAll('let x = x + 1');
+TestAll('let x = x += 1');
+TestAll('let x = x++');
+TestAll('let x = ++x');
+TestAll('const x = x + 1');
+
+// Use before initialization in prior statement.
+TestAll('x + 1; let x;');
+TestAll('x = 1; let x;');
+TestAll('x += 1; let x;');
+TestAll('++x; let x;');
+TestAll('x++; let x;');
+TestAll('let y = x; const x = 1;');
+
+
+function f(x) {
+ let y = x + 42;
+ return y;
+}
+
+function g(x) {
+ {
+ let y = x + 42;
+ return y;
+ }
+}
+
+for (var i=0; i<10; i++) {
+ f(i);
+ g(i);
+}
+
+%OptimizeFunctionOnNextCall(f);
+%OptimizeFunctionOnNextCall(g);
+
+f(12);
+g(12);
+
+assertTrue(%GetOptimizationStatus(f) != 2);
+assertTrue(%GetOptimizationStatus(g) != 2);
diff --git a/deps/v8/test/mjsunit/harmony/block-let-declaration-sloppy.js b/deps/v8/test/mjsunit/harmony/block-let-declaration-sloppy.js
new file mode 100644
index 0000000000..b94576cabc
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/block-let-declaration-sloppy.js
@@ -0,0 +1,174 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test let declarations in various settings.
+
+// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let
+
+// Global
+let x;
+let y = 2;
+const z = 4;
+class c { static foo() { return 1; } }
+
+// Block local
+{
+ let y;
+ let x = 3;
+ const z = 5;
+ class c { static foo() { return 2; } }
+}
+
+assertEquals(undefined, x);
+assertEquals(2,y);
+assertEquals(4,z);
+assertEquals(1, c.foo());
+
+if (true) {
+ let y;
+ assertEquals(undefined, y);
+}
+
+// Invalid declarations are early errors in harmony mode and thus should trigger
+// an exception in eval code during parsing, before even compiling or executing
+// the code. Thus the generated function is not called here.
+function TestLocalThrows(str, expect) {
+ assertThrows("(function(arg){ 'use strict'; " + str + "})", expect);
+}
+
+function TestLocalDoesNotThrow(str) {
+ assertDoesNotThrow("(function(arg){ 'use strict'; " + str + "})()");
+}
+
+// Test let declarations in statement positions.
+TestLocalThrows("if (true) let x;", SyntaxError);
+TestLocalThrows("if (true) {} else let x;", SyntaxError);
+TestLocalThrows("do let x; while (false)", SyntaxError);
+TestLocalThrows("while (false) let x;", SyntaxError);
+TestLocalThrows("label: let x;", SyntaxError);
+TestLocalThrows("for (;false;) let x;", SyntaxError);
+TestLocalDoesNotThrow("switch (true) { case true: let x; }");
+TestLocalDoesNotThrow("switch (true) { default: let x; }");
+
+// Test const declarations with initialisers in statement positions.
+TestLocalThrows("if (true) const x = 1;", SyntaxError);
+TestLocalThrows("if (true) {} else const x = 1;", SyntaxError);
+TestLocalThrows("do const x = 1; while (false)", SyntaxError);
+TestLocalThrows("while (false) const x = 1;", SyntaxError);
+TestLocalThrows("label: const x = 1;", SyntaxError);
+TestLocalThrows("for (;false;) const x = 1;", SyntaxError);
+TestLocalDoesNotThrow("switch (true) { case true: const x = 1; }");
+TestLocalDoesNotThrow("switch (true) { default: const x = 1; }");
+
+// Test const declarations without initialisers.
+TestLocalThrows("const x;", SyntaxError);
+TestLocalThrows("const x = 1, y;", SyntaxError);
+TestLocalThrows("const x, y = 1;", SyntaxError);
+
+// Test const declarations without initialisers in statement positions.
+TestLocalThrows("if (true) const x;", SyntaxError);
+TestLocalThrows("if (true) {} else const x;", SyntaxError);
+TestLocalThrows("do const x; while (false)", SyntaxError);
+TestLocalThrows("while (false) const x;", SyntaxError);
+TestLocalThrows("label: const x;", SyntaxError);
+TestLocalThrows("for (;false;) const x;", SyntaxError);
+TestLocalThrows("switch (true) { case true: const x; }", SyntaxError);
+TestLocalThrows("switch (true) { default: const x; }", SyntaxError);
+
+// Test var declarations in statement positions.
+TestLocalDoesNotThrow("if (true) var x;");
+TestLocalDoesNotThrow("if (true) {} else var x;");
+TestLocalDoesNotThrow("do var x; while (false)");
+TestLocalDoesNotThrow("while (false) var x;");
+TestLocalDoesNotThrow("label: var x;");
+TestLocalDoesNotThrow("for (;false;) var x;");
+TestLocalDoesNotThrow("switch (true) { case true: var x; }");
+TestLocalDoesNotThrow("switch (true) { default: var x; }");
+
+// Test class declarations with initialisers in statement positions.
+TestLocalThrows("if (true) class x { };", SyntaxError);
+TestLocalThrows("if (true) {} else class x { };", SyntaxError);
+TestLocalThrows("do class x { }; while (false)", SyntaxError);
+TestLocalThrows("while (false) class x { };", SyntaxError);
+TestLocalThrows("label: class x { };", SyntaxError);
+TestLocalThrows("for (;false;) class x { };", SyntaxError);
+TestLocalDoesNotThrow("switch (true) { case true: class x { }; }");
+TestLocalDoesNotThrow("switch (true) { default: class x { }; }");
+
+// Test that redeclarations of functions are only allowed in outermost scope.
+TestLocalThrows("{ let f; var f; }");
+TestLocalThrows("{ var f; let f; }");
+TestLocalThrows("{ function f() {} let f; }");
+TestLocalThrows("{ let f; function f() {} }");
+TestLocalThrows("{ function f() {} var f; }");
+TestLocalThrows("{ var f; function f() {} }");
+TestLocalThrows("{ function f() {} class f {} }");
+TestLocalThrows("{ class f {}; function f() {} }");
+TestLocalThrows("{ function f() {} function f() {} }");
+TestLocalThrows("function f() {} let f;");
+TestLocalThrows("let f; function f() {}");
+TestLocalThrows("function f() {} class f {}");
+TestLocalThrows("class f {}; function f() {}");
+TestLocalDoesNotThrow("function arg() {}");
+TestLocalDoesNotThrow("function f() {} var f;");
+TestLocalDoesNotThrow("var f; function f() {}");
+TestLocalDoesNotThrow("function f() {} function f() {}");
+
+function g(f) {
+ function f() { return 1 }
+ return f()
+}
+assertEquals(1, g(function() { return 2 }))
+
+
+// Test function declarations in source element and
+// sloppy statement positions.
+function f() {
+ // Sloppy source element positions.
+ function g0() {
+ "use strict";
+ // Strict source element positions.
+ function h() { }
+ {
+ function h1() { }
+ }
+ }
+ {
+ function g1() { }
+ }
+}
+f();
+
+// Test function declarations in statement position in strict mode.
+TestLocalThrows("function f() { if (true) function g() {} }", SyntaxError);
+TestLocalThrows("function f() { if (true) {} else function g() {} }", SyntaxError);
+TestLocalThrows("function f() { do function g() {} while (false) }", SyntaxError);
+TestLocalThrows("function f() { while (false) function g() {} }", SyntaxError);
+TestLocalThrows("function f() { label: function g() {} }", SyntaxError);
+TestLocalThrows("function f() { for (;false;) function g() {} }", SyntaxError);
+TestLocalDoesNotThrow("function f() { switch (true) { case true: function g() {} } }");
+TestLocalDoesNotThrow("function f() { switch (true) { default: function g() {} } }");
diff --git a/deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js b/deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js
new file mode 100644
index 0000000000..3d529fc36d
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js
@@ -0,0 +1,192 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-sloppy --no-legacy-const --harmony-sloppy-let --harmony-sloppy-function
+
+// Test temporal dead zone semantics of let bound variables in
+// function and block scopes.
+
+function TestFunctionLocal(s) {
+ try {
+ eval("(function(){" + s + "; })")();
+ } catch (e) {
+ assertInstanceof(e, ReferenceError);
+ return;
+ }
+ assertUnreachable();
+}
+
+function TestBlockLocal(s,e) {
+ try {
+ eval("(function(){ {" + s + ";} })")();
+ } catch (e) {
+ assertInstanceof(e, ReferenceError);
+ return;
+ }
+ assertUnreachable();
+}
+
+
+function TestAll(s) {
+ TestBlockLocal(s);
+ TestFunctionLocal(s);
+}
+
+// Use before initialization in declaration statement.
+TestAll('let x = x + 1');
+TestAll('let x = x += 1');
+TestAll('let x = x++');
+TestAll('let x = ++x');
+TestAll('const x = x + 1');
+
+// Use before initialization in prior statement.
+TestAll('x + 1; let x;');
+TestAll('x = 1; let x;');
+TestAll('x += 1; let x;');
+TestAll('++x; let x;');
+TestAll('x++; let x;');
+TestAll('let y = x; const x = 1;');
+TestAll('let y = x; class x {}');
+
+TestAll('f(); let x; function f() { return x + 1; }');
+TestAll('f(); let x; function f() { x = 1; }');
+TestAll('f(); let x; function f() { x += 1; }');
+TestAll('f(); let x; function f() { ++x; }');
+TestAll('f(); let x; function f() { x++; }');
+TestAll('f(); const x = 1; function f() { return x; }');
+TestAll('f(); class x { }; function f() { return x; }');
+
+TestAll('f()(); let x; function f() { return function() { return x + 1; } }');
+TestAll('f()(); let x; function f() { return function() { x = 1; } }');
+TestAll('f()(); let x; function f() { return function() { x += 1; } }');
+TestAll('f()(); let x; function f() { return function() { ++x; } }');
+TestAll('f()(); let x; function f() { return function() { x++; } }');
+TestAll('f()(); const x = 1; function f() { return function() { return x; } }');
+TestAll('f()(); class x { }; function f() { return function() { return x; } }');
+
+for (var kw of ['let x = 2', 'const x = 2', 'class x { }']) {
+ // Use before initialization with a dynamic lookup.
+ TestAll(`eval("x"); ${kw};`);
+ TestAll(`eval("x + 1;"); ${kw};`);
+ TestAll(`eval("x = 1;"); ${kw};`);
+ TestAll(`eval("x += 1;"); ${kw};`);
+ TestAll(`eval("++x;"); ${kw};`);
+ TestAll(`eval("x++;"); ${kw};`);
+
+ // Use before initialization with check for eval-shadowed bindings.
+ TestAll(`function f() { eval("var y = 2;"); x + 1; }; f(); ${kw};`);
+ TestAll(`function f() { eval("var y = 2;"); x = 1; }; f(); ${kw};`);
+ TestAll(`function f() { eval("var y = 2;"); x += 1; }; f(); ${kw};`);
+ TestAll(`function f() { eval("var y = 2;"); ++x; }; f(); ${kw};`);
+ TestAll(`function f() { eval("var y = 2;"); x++; }; f(); ${kw};`);
+}
+
+// Test that variables introduced by function declarations are created and
+// initialized upon entering a function / block scope.
+function f() {
+ {
+ assertEquals(2, g1());
+ assertEquals(2, eval("g1()"));
+
+ // block scoped function declaration
+ function g1() {
+ return 2;
+ }
+ }
+
+ assertEquals(3, g2());
+ assertEquals(3, eval("g2()"));
+ // function scoped function declaration
+ function g2() {
+ return 3;
+ }
+}
+f();
+
+// Test that a function declaration introduces a block scoped variable.
+TestAll('{ function k() { return 0; } }; k(); ');
+
+// Test that a function declaration sees the scope it resides in.
+function f2() {
+ let m, n, o, p;
+ {
+ m = g;
+ function g() {
+ return a;
+ }
+ let a = 1;
+ }
+ assertEquals(1, m());
+
+ try {
+ throw 2;
+ } catch(b) {
+ n = h;
+ function h() {
+ return b + c;
+ }
+ let c = 3;
+ }
+ assertEquals(5, n());
+
+ {
+ o = i;
+ function i() {
+ return d;
+ }
+ let d = 4;
+ }
+ assertEquals(4, o());
+
+ try {
+ throw 5;
+ } catch(e) {
+ p = j;
+ function j() {
+ return e + f;
+ }
+ let f = 6;
+ }
+ assertEquals(11, p());
+}
+f2();
+
+// Test that resolution of let bound variables works with scopes that call eval.
+function outer() {
+ function middle() {
+ function inner() {
+ return x;
+ }
+ eval("1 + 1");
+ return x + inner();
+ }
+
+ let x = 1;
+ return middle();
+}
+
+assertEquals(2, outer());
diff --git a/deps/v8/test/mjsunit/harmony/block-scope-class.js b/deps/v8/test/mjsunit/harmony/block-scope-class.js
new file mode 100644
index 0000000000..351feaa90e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/block-scope-class.js
@@ -0,0 +1,59 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test for conflicting variable bindings.
+
+// Flags: --harmony-sloppy --harmony-sloppy-function
+
+function AssertEqualsStrictAndSloppy(value, code) {
+ assertEquals(value, eval("(function() {" + code + "})()"));
+ assertEquals(value, eval("(function() { 'use strict'; " + code + "})()"));
+ assertEquals(value, eval("(function() { var x = 0; {" + code + "} })()"));
+ assertEquals(value, eval("(function() { 'use strict'; var x = 0; {"
+ + code + "} })()"));
+}
+
+function AssertThrowsStrictAndSloppy(code, error) {
+ assertThrows("(function() {" + code + "})()", error);
+ assertThrows("(function() { 'use strict'; " + code + "})()", error);
+ assertThrows("(function() { var x = 0; { " + code + "} })()", error);
+ assertThrows("(function() { 'use strict'; var x = 0; {" + code + "} })()",
+ error);
+}
+
+(function TestClassTDZ() {
+ AssertEqualsStrictAndSloppy(
+ "x", "function f() { return x; }; class x { }; return f().name;");
+ AssertEqualsStrictAndSloppy
+ ("x", "class x { }; function f() { return x; }; return f().name;");
+ AssertEqualsStrictAndSloppy(
+ "x", "class x { }; var result = f().name; " +
+ "function f() { return x; }; return result;");
+ AssertThrowsStrictAndSloppy(
+ "function f() { return x; }; f(); class x { };", ReferenceError);
+ AssertThrowsStrictAndSloppy(
+ "f(); function f() { return x; }; class x { };", ReferenceError);
+ AssertThrowsStrictAndSloppy(
+ "f(); class x { }; function f() { return x; };", ReferenceError);
+ AssertThrowsStrictAndSloppy(
+ "var x = 1; { f(); class x { }; function f() { return x; }; }",
+ ReferenceError);
+ AssertThrowsStrictAndSloppy("x = 3; class x { };", ReferenceError)
+})();
+
+(function TestClassNameConflict() {
+ AssertThrowsStrictAndSloppy("class x { }; var x;", SyntaxError);
+ AssertThrowsStrictAndSloppy("var x; class x { };", SyntaxError);
+ AssertThrowsStrictAndSloppy("class x { }; function x() { };", SyntaxError);
+ AssertThrowsStrictAndSloppy("function x() { }; class x { };", SyntaxError);
+ AssertThrowsStrictAndSloppy("class x { }; for (var x = 0; false;) { };",
+ SyntaxError);
+ AssertThrowsStrictAndSloppy("for (var x = 0; false;) { }; class x { };",
+ SyntaxError);
+})();
+
+(function TestClassMutableBinding() {
+ AssertEqualsStrictAndSloppy(
+ "x3", "class x { }; var y = x.name; x = 3; return y + x;")
+})();
diff --git a/deps/v8/test/mjsunit/harmony/block-scoping-sloppy.js b/deps/v8/test/mjsunit/harmony/block-scoping-sloppy.js
new file mode 100644
index 0000000000..c91a9fbf1c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/block-scoping-sloppy.js
@@ -0,0 +1,310 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --harmony-sloppy --no-legacy-const --harmony-sloppy-let --harmony-sloppy-function
+// Test functionality of block scopes.
+
+// Hoisting of var declarations.
+function f1() {
+ {
+ var x = 1;
+ var y;
+ }
+ assertEquals(1, x)
+ assertEquals(undefined, y)
+}
+for (var j = 0; j < 5; ++j) f1();
+%OptimizeFunctionOnNextCall(f1);
+f1();
+assertTrue(%GetOptimizationStatus(f1) != 2);
+
+// Dynamic lookup in and through block contexts.
+function f2(one) {
+ var x = one + 1;
+ let y = one + 2;
+ const u = one + 4;
+ class a { static foo() { return one + 6; } }
+ {
+ let z = one + 3;
+ const v = one + 5;
+ class b { static foo() { return one + 7; } }
+ assertEquals(1, eval('one'));
+ assertEquals(2, eval('x'));
+ assertEquals(3, eval('y'));
+ assertEquals(4, eval('z'));
+ assertEquals(5, eval('u'));
+ assertEquals(6, eval('v'));
+ assertEquals(7, eval('a.foo()'));
+ assertEquals(8, eval('b.foo()'));
+ }
+}
+
+f2(1);
+
+// Lookup in and through block contexts.
+function f3(one) {
+ var x = one + 1;
+ let y = one + 2;
+ const u = one + 4;
+ class a { static foo() { return one + 6; } }
+ {
+ let z = one + 3;
+ const v = one + 5;
+ class b { static foo() { return one + 7; } }
+ assertEquals(1, one);
+ assertEquals(2, x);
+ assertEquals(3, y);
+ assertEquals(4, z);
+ assertEquals(5, u);
+ assertEquals(6, v);
+ assertEquals(7, a.foo());
+ assertEquals(8, b.foo());
+ }
+}
+for (var j = 0; j < 5; ++j) f3(1);
+%OptimizeFunctionOnNextCall(f3);
+f3(1);
+assertTrue(%GetOptimizationStatus(f3) != 2);
+
+
+
+// Dynamic lookup from closure.
+function f4(one) {
+ var x = one + 1;
+ let y = one + 2;
+ const u = one + 4;
+ class a { static foo() { return one + 6; } }
+ {
+ let z = one + 3;
+ const v = one + 5;
+ class b { static foo() { return one + 7; } }
+ function f() {
+ assertEquals(1, eval('one'));
+ assertEquals(2, eval('x'));
+ assertEquals(3, eval('y'));
+ assertEquals(4, eval('z'));
+ assertEquals(5, eval('u'));
+ assertEquals(6, eval('v'));
+ assertEquals(7, eval('a.foo()'));
+ assertEquals(8, eval('b.foo()'));
+ }
+ f();
+ }
+}
+f4(1);
+
+
+// Lookup from closure.
+function f5(one) {
+ var x = one + 1;
+ let y = one + 2;
+ const u = one + 4;
+ class a { static foo() { return one + 6; } }
+ {
+ let z = one + 3;
+ const v = one + 5;
+ class b { static foo() { return one + 7; } }
+ function f() {
+ assertEquals(1, one);
+ assertEquals(2, x);
+ assertEquals(3, y);
+ assertEquals(4, z);
+ assertEquals(5, u);
+ assertEquals(6, v);
+ assertEquals(7, a.foo());
+ assertEquals(8, b.foo());
+ }
+ f();
+ }
+}
+f5(1);
+
+
+// Return from block.
+function f6() {
+ let x = 1;
+ const u = 3;
+ {
+ let y = 2;
+ const v = 4;
+ return x + y;
+ }
+}
+assertEquals(3, f6(6));
+
+
+// Variable shadowing and lookup.
+function f7(a) {
+ let b = 1;
+ var c = 1;
+ var d = 1;
+ const e = 1;
+ class f { static foo() { return 1; } }
+ { // let variables shadowing argument, let, const, class and var variables
+ let a = 2;
+ let b = 2;
+ let c = 2;
+ let e = 2;
+ let f = 2;
+ assertEquals(2,a);
+ assertEquals(2,b);
+ assertEquals(2,c);
+ assertEquals(2,e);
+ assertEquals(2,f);
+ }
+ { // const variables shadowing argument, let, const and var variables
+ const a = 2;
+ const b = 2;
+ const c = 2;
+ const e = 2;
+ const f = 2;
+ assertEquals(2,a);
+ assertEquals(2,b);
+ assertEquals(2,c);
+ assertEquals(2,e);
+ assertEquals(2,f);
+ }
+ { // class variables shadowing argument, let, const and var variables
+ class a { static foo() { return 2; } }
+ class b { static foo() { return 2; } }
+ class c { static foo() { return 2; } }
+ class d { static foo() { return 2; } }
+ class e { static foo() { return 2; } }
+ class f { static foo() { return 2; } }
+ assertEquals(2,a.foo());
+ assertEquals(2,b.foo());
+ assertEquals(2,c.foo());
+ assertEquals(2,e.foo());
+ assertEquals(2,f.foo());
+ }
+ try {
+ throw 'stuff1';
+ } catch (a) {
+ assertEquals('stuff1',a);
+ // catch variable shadowing argument
+ a = 2;
+ assertEquals(2,a);
+ {
+ // let variable shadowing catch variable
+ let a = 3;
+ assertEquals(3,a);
+ try {
+ throw 'stuff2';
+ } catch (a) {
+ assertEquals('stuff2',a);
+ // catch variable shadowing let variable
+ a = 4;
+ assertEquals(4,a);
+ }
+ assertEquals(3,a);
+ }
+ assertEquals(2,a);
+ }
+ try {
+ throw 'stuff3';
+ } catch (c) {
+ // catch variable shadowing var variable
+ assertEquals('stuff3',c);
+ {
+ // const variable shadowing catch variable
+ const c = 3;
+ assertEquals(3,c);
+ }
+ assertEquals('stuff3',c);
+ try {
+ throw 'stuff4';
+ } catch(c) {
+ assertEquals('stuff4',c);
+ // catch variable shadowing catch variable
+ c = 3;
+ assertEquals(3,c);
+ }
+ (function(c) {
+ // argument shadowing catch variable
+ c = 3;
+ assertEquals(3,c);
+ })();
+ assertEquals('stuff3', c);
+ (function() {
+ // var variable shadowing catch variable
+ var c = 3;
+ })();
+ assertEquals('stuff3', c);
+ c = 2;
+ }
+ assertEquals(1,c);
+ (function(a,b,c,e,f) {
+ // arguments shadowing argument, let, const, class and var variable
+ a = 2;
+ b = 2;
+ c = 2;
+ e = 2;
+ f = 2;
+ assertEquals(2,a);
+ assertEquals(2,b);
+ assertEquals(2,c);
+ assertEquals(2,e);
+ assertEquals(2,f);
+ // var variable shadowing var variable
+ var d = 2;
+ })(1,1);
+ assertEquals(1,a);
+ assertEquals(1,b);
+ assertEquals(1,c);
+ assertEquals(1,d);
+ assertEquals(1,e);
+ assertEquals(1,f.foo());
+}
+f7(1);
+
+
+// Ensure let and const variables are block local
+// and var variables function local.
+function f8() {
+ var let_accessors = [];
+ var var_accessors = [];
+ var const_accessors = [];
+ var class_accessors = [];
+ for (var i = 0; i < 10; i++) {
+ let x = i;
+ var y = i;
+ const z = i;
+ class a { static foo() { return x; } }
+ let_accessors[i] = function() { return x; }
+ var_accessors[i] = function() { return y; }
+ const_accessors[i] = function() { return z; }
+ class_accessors[i] = function() { return a; }
+ }
+ for (var j = 0; j < 10; j++) {
+ y = j + 10;
+ assertEquals(j, let_accessors[j]());
+ assertEquals(y, var_accessors[j]());
+ assertEquals(j, const_accessors[j]());
+ assertEquals(j, class_accessors[j]().foo());
+ }
+}
+f8();
diff --git a/deps/v8/test/mjsunit/harmony/block-scoping-top-level-sloppy.js b/deps/v8/test/mjsunit/harmony/block-scoping-top-level-sloppy.js
new file mode 100644
index 0000000000..74492c4ca6
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/block-scoping-top-level-sloppy.js
@@ -0,0 +1,34 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --min-preparse-length=0
+// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let
+
+let xxx = 1;
+let f = undefined;
+{
+ let inner_x = xxx;
+ f = function() { return inner_x; };
+}
+
+assertSame(1, f());
+
+xxx = 42;
+{
+ f = function() { return inner_x1; };
+ let inner_x1 = xxx;
+}
+
+assertSame(42, f());
+
+xxx = 31;
+{
+ let inner_x1 = xxx;
+ try {
+ throw new Error();
+ } catch (e) {
+ f = function() { return inner_x1; };
+ }
+}
+assertSame(31, f());
diff --git a/deps/v8/test/mjsunit/harmony/default-parameters-debug.js b/deps/v8/test/mjsunit/harmony/default-parameters-debug.js
new file mode 100644
index 0000000000..ce9e626621
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/default-parameters-debug.js
@@ -0,0 +1,58 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --harmony-default-parameters
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+listenerComplete = false;
+breakPointCount = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) {
+ breakPointCount++;
+ if (breakPointCount == 1) {
+ // Break point in initializer for parameter `a`, invoked by
+ // initializer for parameter `b`
+ assertEquals('default', exec_state.frame(1).evaluate('mode').value());
+
+ // initializer for `b` can't refer to `b`
+ assertThrows(function() {
+ exec_state.frame(1).evaluate('b').value();
+ }, ReferenceError);
+
+ assertThrows(function() {
+ exec_state.frame(1).evaluate('c');
+ }, ReferenceError);
+ } else if (breakPointCount == 2) {
+ // Break point in IIFE initializer for parameter `c`
+ assertEquals('modeFn', exec_state.frame(1).evaluate('a.name').value());
+ assertEquals('default', exec_state.frame(1).evaluate('b').value());
+ assertThrows(function() {
+ exec_state.frame(1).evaluate('c');
+ }, ReferenceError);
+ } else if (breakPointCount == 3) {
+ // Break point in function body --- `c` parameter is shadowed
+ assertEquals('modeFn', exec_state.frame(0).evaluate('a.name').value());
+ assertEquals('default', exec_state.frame(0).evaluate('b').value());
+ assertEquals('local', exec_state.frame(0).evaluate('d').value());
+ }
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function f(a = function modeFn(mode) { debugger; return mode; },
+ b = a("default"),
+ c = (function() { debugger; })()) {
+ var d = 'local';
+ debugger;
+};
+
+f();
+
+// Make sure that the debug event listener vas invoked.
+assertEquals(3, breakPointCount);
diff --git a/deps/v8/test/mjsunit/harmony/default-parameters.js b/deps/v8/test/mjsunit/harmony/default-parameters.js
new file mode 100644
index 0000000000..43a7acd1c6
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/default-parameters.js
@@ -0,0 +1,251 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-default-parameters --harmony-arrow-functions
+// Flags: --harmony-rest-parameters
+
+
+(function TestDefaults() {
+ function f1(x = 1) { return x }
+ assertEquals(1, f1());
+ assertEquals(1, f1(undefined));
+ assertEquals(2, f1(2));
+ assertEquals(null, f1(null));
+
+ function f2(x, y = x) { return x + y; }
+ assertEquals(8, f2(4));
+ assertEquals(8, f2(4, undefined));
+ assertEquals(6, f2(4, 2));
+
+ function f3(x = 1, y) { return x + y; }
+ assertEquals(8, f3(5, 3));
+ assertEquals(3, f3(undefined, 2));
+ assertEquals(6, f3(4, 2));
+
+ function f4(x = () => 1) { return x() }
+ assertEquals(1, f4());
+ assertEquals(1, f4(undefined));
+ assertEquals(2, f4(() => 2));
+ assertThrows(() => f4(null), TypeError);
+
+ function f5(x, y = () => x) { return x + y(); }
+ assertEquals(8, f5(4));
+ assertEquals(8, f5(4, undefined));
+ assertEquals(6, f5(4, () => 2));
+
+ function f6(x = {a: 1, m() { return 2 }}) { return x.a + x.m(); }
+ assertEquals(3, f6());
+ assertEquals(3, f6(undefined));
+ assertEquals(5, f6({a: 2, m() { return 3 }}));
+
+ var g1 = (x = 1) => { return x };
+ assertEquals(1, g1());
+ assertEquals(1, g1(undefined));
+ assertEquals(2, g1(2));
+ assertEquals(null, g1(null));
+
+ var g2 = (x, y = x) => { return x + y; };
+ assertEquals(8, g2(4));
+ assertEquals(8, g2(4, undefined));
+ assertEquals(6, g2(4, 2));
+
+ var g3 = (x = 1, y) => { return x + y; };
+ assertEquals(8, g3(5, 3));
+ assertEquals(3, g3(undefined, 2));
+ assertEquals(6, g3(4, 2));
+
+ var g4 = (x = () => 1) => { return x() };
+ assertEquals(1, g4());
+ assertEquals(1, g4(undefined));
+ assertEquals(2, g4(() => 2));
+ assertThrows(() => g4(null), TypeError);
+
+ var g5 = (x, y = () => x) => { return x + y(); };
+ assertEquals(8, g5(4));
+ assertEquals(8, g5(4, undefined));
+ assertEquals(6, g5(4, () => 2));
+
+ var g6 = (x = {a: 1, m() { return 2 }}) => { return x.a + x.m(); };
+ assertEquals(3, g6());
+ assertEquals(3, g6(undefined));
+ assertEquals(5, g6({a: 2, m() { return 3 }}));
+}());
+
+
+(function TestEvalInParameters() {
+ function f1(x = eval(0)) { return x }
+ assertEquals(0, f1());
+ function f2(x = () => eval(1)) { return x() }
+ assertEquals(1, f2());
+})();
+
+
+(function TestParameterScoping() {
+ // TODO(rossberg): Add checks for variable declarations in defaults.
+ var x = 1;
+
+ function f1(a = x) { var x = 2; return a; }
+ assertEquals(1, f1());
+ function f2(a = x) { function x() {}; return a; }
+ assertEquals(1, f2());
+ function f3(a = x) { 'use strict'; let x = 2; return a; }
+ assertEquals(1, f3());
+ function f4(a = x) { 'use strict'; const x = 2; return a; }
+ assertEquals(1, f4());
+ function f5(a = x) { 'use strict'; function x() {}; return a; }
+ assertEquals(1, f5());
+ function f6(a = eval("x")) { var x; return a; }
+ assertEquals(1, f6());
+ function f61(a = eval("x")) { 'use strict'; var x; return a; }
+ assertEquals(1, f61());
+ function f62(a = eval("'use strict'; x")) { var x; return a; }
+ assertEquals(1, f62());
+ function f7(a = function() { return x }) { var x; return a(); }
+ assertEquals(1, f7());
+ function f8(a = () => x) { var x; return a(); }
+ assertEquals(1, f8());
+ function f9(a = () => eval("x")) { var x; return a(); }
+ assertEquals(1, f9());
+ function f91(a = () => eval("x")) { 'use strict'; var x; return a(); }
+ assertEquals(1, f91());
+ function f92(a = () => { 'use strict'; return eval("x") }) { var x; return a(); }
+ assertEquals(1, f92());
+ function f93(a = () => eval("'use strict'; x")) { var x; return a(); }
+ assertEquals(1, f93());
+
+ var g1 = (a = x) => { var x = 2; return a; };
+ assertEquals(1, g1());
+ var g2 = (a = x) => { function x() {}; return a; };
+ assertEquals(1, g2());
+ var g3 = (a = x) => { 'use strict'; let x = 2; return a; };
+ assertEquals(1, g3());
+ var g4 = (a = x) => { 'use strict'; const x = 2; return a; };
+ assertEquals(1, g4());
+ var g5 = (a = x) => { 'use strict'; function x() {}; return a; };
+ assertEquals(1, g5());
+ var g6 = (a = eval("x")) => { var x; return a; };
+ assertEquals(1, g6());
+ var g61 = (a = eval("x")) => { 'use strict'; var x; return a; };
+ assertEquals(1, g61());
+ var g62 = (a = eval("'use strict'; x")) => { var x; return a; };
+ assertEquals(1, g62());
+ var g7 = (a = function() { return x }) => { var x; return a(); };
+ assertEquals(1, g7());
+ var g8 = (a = () => x) => { var x; return a(); };
+ assertEquals(1, g8());
+ var g9 = (a = () => eval("x")) => { var x; return a(); };
+ assertEquals(1, g9());
+ var g91 = (a = () => eval("x")) => { 'use strict'; var x; return a(); };
+ assertEquals(1, g91());
+ var g92 = (a = () => { 'use strict'; return eval("x") }) => { var x; return a(); };
+ assertEquals(1, g92());
+ var g93 = (a = () => eval("'use strict'; x")) => { var x; return a(); };
+ assertEquals(1, g93());
+
+ var f11 = function f(x = f) { var f; return x; }
+ assertSame(f11, f11());
+ var f12 = function f(x = f) { function f() {}; return x; }
+ assertSame(f12, f12());
+ var f13 = function f(x = f) { 'use strict'; let f; return x; }
+ assertSame(f13, f13());
+ var f14 = function f(x = f) { 'use strict'; const f = 0; return x; }
+ assertSame(f14, f14());
+ var f15 = function f(x = f) { 'use strict'; function f() {}; return x; }
+ assertSame(f15, f15());
+ var f16 = function f(f = 7, x = f) { return x; }
+ assertSame(7, f16());
+
+ var o1 = {f: function(x = this) { return x; }};
+ assertSame(o1, o1.f());
+ assertSame(1, o1.f(1));
+})();
+
+
+(function TestParameterTDZ() {
+ function f1(a = x, x) { return a }
+ assertThrows(() => f1(undefined, 4), ReferenceError);
+ assertEquals(4, f1(4, 5));
+ function f2(a = eval("x"), x) { return a }
+ assertThrows(() => f2(undefined, 4), ReferenceError);
+ assertEquals(4, f2(4, 5));
+ function f3(a = eval("x"), x) { 'use strict'; return a }
+ assertThrows(() => f3(undefined, 4), ReferenceError);
+ assertEquals(4, f3(4, 5));
+ function f4(a = eval("'use strict'; x"), x) { return a }
+ assertThrows(() => f4(undefined, 4), ReferenceError);
+ assertEquals(4, f4(4, 5));
+
+ function f5(a = () => x, x) { return a() }
+ assertEquals(4, f5(() => 4, 5));
+ function f6(a = () => eval("x"), x) { return a() }
+ assertEquals(4, f6(() => 4, 5));
+ function f7(a = () => eval("x"), x) { 'use strict'; return a() }
+ assertEquals(4, f7(() => 4, 5));
+ function f8(a = () => eval("'use strict'; x"), x) { return a() }
+ assertEquals(4, f8(() => 4, 5));
+
+ function f11(a = x, x = 2) { return a }
+ assertThrows(() => f11(), ReferenceError);
+ assertThrows(() => f11(undefined), ReferenceError);
+ assertThrows(() => f11(undefined, 4), ReferenceError);
+ assertEquals(4, f1(4, 5));
+ function f12(a = eval("x"), x = 2) { return a }
+ assertThrows(() => f12(), ReferenceError);
+ assertThrows(() => f12(undefined), ReferenceError);
+ assertThrows(() => f12(undefined, 4), ReferenceError);
+ assertEquals(4, f12(4, 5));
+ function f13(a = eval("x"), x = 2) { 'use strict'; return a }
+ assertThrows(() => f13(), ReferenceError);
+ assertThrows(() => f13(undefined), ReferenceError);
+ assertThrows(() => f13(undefined, 4), ReferenceError);
+ assertEquals(4, f13(4, 5));
+ function f14(a = eval("'use strict'; x"), x = 2) { return a }
+ assertThrows(() => f14(), ReferenceError);
+ assertThrows(() => f14(undefined), ReferenceError);
+ assertThrows(() => f14(undefined, 4), ReferenceError);
+ assertEquals(4, f14(4, 5));
+
+ function f34(x = function() { return a }, ...a) { return x()[0] }
+ assertEquals(4, f34(undefined, 4));
+ function f35(x = () => a, ...a) { return x()[0] }
+ assertEquals(4, f35(undefined, 4));
+ function f36(x = () => eval("a"), ...a) { return x()[0] }
+ assertEquals(4, f36(undefined, 4));
+ function f37(x = () => eval("a"), ...a) { 'use strict'; return x()[0] }
+ assertEquals(4, f37(undefined, 4));
+ function f38(x = () => { 'use strict'; return eval("a") }, ...a) { return x()[0] }
+ assertEquals(4, f38(undefined, 4));
+ function f39(x = () => eval("'use strict'; a"), ...a) { return x()[0] }
+ assertEquals(4, f39(undefined, 4));
+
+ var g34 = (x = function() { return a }, ...a) => { return x()[0] };
+ assertEquals(4, g34(undefined, 4));
+ var g35 = (x = () => a, ...a) => { return x()[0] };
+ assertEquals(4, g35(undefined, 4));
+})();
+
+
+(function TestArgumentsForNonSimpleParameters() {
+ function f1(x = 900) { arguments[0] = 1; return x }
+ assertEquals(9, f1(9));
+ assertEquals(900, f1());
+ function f2(x = 1001) { x = 2; return arguments[0] }
+ assertEquals(10, f2(10));
+ assertEquals(undefined, f2());
+}());
+
+
+(function TestFunctionLength() {
+ // TODO(rossberg): Fix arity.
+ // assertEquals(0, (function(x = 1) {}).length);
+ // assertEquals(0, (function(x = 1, ...a) {}).length);
+ // assertEquals(1, (function(x, y = 1) {}).length);
+ // assertEquals(1, (function(x, y = 1, ...a) {}).length);
+ // assertEquals(2, (function(x, y, z = 1) {}).length);
+ // assertEquals(2, (function(x, y, z = 1, ...a) {}).length);
+ // assertEquals(1, (function(x, y = 1, z) {}).length);
+ // assertEquals(1, (function(x, y = 1, z, ...a) {}).length);
+ // assertEquals(1, (function(x, y = 1, z, v = 2) {}).length);
+ // assertEquals(1, (function(x, y = 1, z, v = 2, ...a) {}).length);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount-nolazy.js b/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount-nolazy.js
index fdf1233f90..52d7ca06d9 100644
--- a/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount-nolazy.js
+++ b/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount-nolazy.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring --harmony-computed-property-names
+// Flags: --harmony-destructuring
// Flags: --harmony-arrow-functions --no-lazy --allow-natives-syntax
diff --git a/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount.js b/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount.js
index 85a45ea822..64c1793673 100644
--- a/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount.js
+++ b/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring --harmony-computed-property-names
+// Flags: --harmony-destructuring
// Flags: --harmony-arrow-functions --allow-natives-syntax
diff --git a/deps/v8/test/mjsunit/harmony/destructuring.js b/deps/v8/test/mjsunit/harmony/destructuring.js
index 198d4c0257..69e144b26f 100644
--- a/deps/v8/test/mjsunit/harmony/destructuring.js
+++ b/deps/v8/test/mjsunit/harmony/destructuring.js
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring --harmony-computed-property-names
-// Flags: --harmony-arrow-functions
+// Flags: --harmony-destructuring --harmony-arrow-functions
+// Flags: --harmony-default-parameters --harmony-rest-parameters
(function TestObjectLiteralPattern() {
var { x : x, y : y } = { x : 1, y : 2 };
@@ -227,6 +227,12 @@
}
{
+ let {x, y = () => eval("x+1")} = {x:42};
+ assertEquals(42, x);
+ assertEquals(43, y());
+ }
+
+ {
let {x = function() {return y+1;}, y} = {y:42};
assertEquals(43, x());
assertEquals(42, y);
@@ -716,6 +722,191 @@
}());
+(function TestExpressionsInParameters() {
+ function f0(x = eval(0)) { return x }
+ assertEquals(0, f0());
+ function f1({a = eval(1)}) { return a }
+ assertEquals(1, f1({}));
+ function f2([x = eval(2)]) { return x }
+ assertEquals(2, f2([]));
+ function f3({[eval(7)]: x}) { return x }
+ assertEquals(3, f3({7: 3}));
+})();
+
+
+(function TestParameterScoping() {
+ var x = 1;
+
+ function f1({a = x}) { var x = 2; return a; }
+ assertEquals(1, f1({}));
+ function f2({a = x}) { function x() {}; return a; }
+ assertEquals(1, f2({}));
+ function f3({a = x}) { 'use strict'; let x = 2; return a; }
+ assertEquals(1, f3({}));
+ function f4({a = x}) { 'use strict'; const x = 2; return a; }
+ assertEquals(1, f4({}));
+ function f5({a = x}) { 'use strict'; function x() {}; return a; }
+ assertEquals(1, f5({}));
+ function f6({a = eval("x")}) { var x; return a; }
+ assertEquals(1, f6({}));
+ function f61({a = eval("x")}) { 'use strict'; var x; return a; }
+ assertEquals(1, f61({}));
+ function f62({a = eval("'use strict'; x")}) { var x; return a; }
+ assertEquals(1, f62({}));
+ function f7({a = function() { return x }}) { var x; return a(); }
+ assertEquals(1, f7({}));
+ function f8({a = () => x}) { var x; return a(); }
+ assertEquals(1, f8({}));
+ function f9({a = () => eval("x")}) { var x; return a(); }
+ assertEquals(1, f9({}));
+ function f91({a = () => eval("x")}) { 'use strict'; var x; return a(); }
+ assertEquals(1, f91({}));
+ function f92({a = () => { 'use strict'; return eval("x") }}) { var x; return a(); }
+ assertEquals(1, f92({}));
+ function f93({a = () => eval("'use strict'; x")}) { var x; return a(); }
+ assertEquals(1, f93({}));
+
+ var g1 = ({a = x}) => { var x = 2; return a; };
+ assertEquals(1, g1({}));
+ var g2 = ({a = x}) => { function x() {}; return a; };
+ assertEquals(1, g2({}));
+ var g3 = ({a = x}) => { 'use strict'; let x = 2; return a; };
+ assertEquals(1, g3({}));
+ var g4 = ({a = x}) => { 'use strict'; const x = 2; return a; };
+ assertEquals(1, g4({}));
+ var g5 = ({a = x}) => { 'use strict'; function x() {}; return a; };
+ assertEquals(1, g5({}));
+ var g6 = ({a = eval("x")}) => { var x; return a; };
+ assertEquals(1, g6({}));
+ var g61 = ({a = eval("x")}) => { 'use strict'; var x; return a; };
+ assertEquals(1, g61({}));
+ var g62 = ({a = eval("'use strict'; x")}) => { var x; return a; };
+ assertEquals(1, g62({}));
+ var g7 = ({a = function() { return x }}) => { var x; return a(); };
+ assertEquals(1, g7({}));
+ var g8 = ({a = () => x}) => { var x; return a(); };
+ assertEquals(1, g8({}));
+ var g9 = ({a = () => eval("x")}) => { var x; return a(); };
+ assertEquals(1, g9({}));
+ var g91 = ({a = () => eval("x")}) => { 'use strict'; var x; return a(); };
+ assertEquals(1, g91({}));
+ var g92 = ({a = () => { 'use strict'; return eval("x") }}) => { var x; return a(); };
+ assertEquals(1, g92({}));
+ var g93 = ({a = () => eval("'use strict'; x")}) => { var x; return a(); };
+ assertEquals(1, g93({}));
+
+ var f11 = function f({x = f}) { var f; return x; }
+ assertSame(f11, f11({}));
+ var f12 = function f({x = f}) { function f() {}; return x; }
+ assertSame(f12, f12({}));
+ var f13 = function f({x = f}) { 'use strict'; let f; return x; }
+ assertSame(f13, f13({}));
+ var f14 = function f({x = f}) { 'use strict'; const f = 0; return x; }
+ assertSame(f14, f14({}));
+ var f15 = function f({x = f}) { 'use strict'; function f() {}; return x; }
+ assertSame(f15, f15({}));
+ var f16 = function f({f = 7, x = f}) { return x; }
+ assertSame(7, f16({}));
+
+ var y = 'a';
+ function f20({[y]: x}) { var y = 'b'; return x; }
+ assertEquals(1, f20({a: 1, b: 2}));
+ function f21({[eval('y')]: x}) { var y = 'b'; return x; }
+ assertEquals(1, f21({a: 1, b: 2}));
+ var g20 = ({[y]: x}) => { var y = 'b'; return x; };
+ assertEquals(1, g20({a: 1, b: 2}));
+ var g21 = ({[eval('y')]: x}) => { var y = 'b'; return x; };
+ assertEquals(1, g21({a: 1, b: 2}));
+})();
+
+
+(function TestParameterDestructuringTDZ() {
+ function f1({a = x}, x) { return a }
+ assertThrows(() => f1({}, 4), ReferenceError);
+ assertEquals(4, f1({a: 4}, 5));
+ function f2({a = eval("x")}, x) { return a }
+ assertThrows(() => f2({}, 4), ReferenceError);
+ assertEquals(4, f2({a: 4}, 5));
+ function f3({a = eval("x")}, x) { 'use strict'; return a }
+ assertThrows(() => f3({}, 4), ReferenceError);
+ assertEquals(4, f3({a: 4}, 5));
+ function f4({a = eval("'use strict'; x")}, x) { return a }
+ assertThrows(() => f4({}, 4), ReferenceError);
+ assertEquals(4, f4({a: 4}, 5));
+
+ function f5({a = () => x}, x) { return a() }
+ assertEquals(4, f5({a: () => 4}, 5));
+ function f6({a = () => eval("x")}, x) { return a() }
+ assertEquals(4, f6({a: () => 4}, 5));
+ function f7({a = () => eval("x")}, x) { 'use strict'; return a() }
+ assertEquals(4, f7({a: () => 4}, 5));
+ function f8({a = () => eval("'use strict'; x")}, x) { return a() }
+ assertEquals(4, f8({a: () => 4}, 5));
+
+ function f11({a = b}, {b}) { return a }
+ assertThrows(() => f11({}, {b: 4}), ReferenceError);
+ assertEquals(4, f11({a: 4}, {b: 5}));
+ function f12({a = eval("b")}, {b}) { return a }
+ assertThrows(() => f12({}, {b: 4}), ReferenceError);
+ assertEquals(4, f12({a: 4}, {b: 5}));
+ function f13({a = eval("b")}, {b}) { 'use strict'; return a }
+ assertThrows(() => f13({}, {b: 4}), ReferenceError);
+ assertEquals(4, f13({a: 4}, {b: 5}));
+ function f14({a = eval("'use strict'; b")}, {b}) { return a }
+ assertThrows(() => f14({}, {b: 4}), ReferenceError);
+ assertEquals(4, f14({a: 4}, {b: 5}));
+
+ function f15({a = () => b}, {b}) { return a() }
+ assertEquals(4, f15({a: () => 4}, {b: 5}));
+ function f16({a = () => eval("b")}, {b}) { return a() }
+ assertEquals(4, f16({a: () => 4}, {b: 5}));
+ function f17({a = () => eval("b")}, {b}) { 'use strict'; return a() }
+ assertEquals(4, f17({a: () => 4}, {b: 5}));
+ function f18({a = () => eval("'use strict'; b")}, {b}) { return a() }
+ assertEquals(4, f18({a: () => 4}, {b: 5}));
+
+ // TODO(caitp): TDZ for rest parameters is not working yet.
+ // function f30({x = a}, ...a) { return x[0] }
+ // assertThrows(() => f30({}), ReferenceError);
+ // assertEquals(4, f30({a: [4]}, 5));
+ // function f31({x = eval("a")}, ...a) { return x[0] }
+ // assertThrows(() => f31({}), ReferenceError);
+ // assertEquals(4, f31({a: [4]}, 5));
+ // function f32({x = eval("a")}, ...a) { 'use strict'; return x[0] }
+ // assertThrows(() => f32({}), ReferenceError);
+ // assertEquals(4, f32({a: [4]}, 5));
+ // function f33({x = eval("'use strict'; a")}, ...a) { return x[0] }
+ // assertThrows(() => f33({}), ReferenceError);
+ // assertEquals(4, f33({a: [4]}, 5));
+
+ function f34({x = function() { return a }}, ...a) { return x()[0] }
+ assertEquals(4, f34({}, 4));
+ function f35({x = () => a}, ...a) { return x()[0] }
+ assertEquals(4, f35({}, 4));
+ function f36({x = () => eval("a")}, ...a) { return x()[0] }
+ assertEquals(4, f36({}, 4));
+ function f37({x = () => eval("a")}, ...a) { 'use strict'; return x()[0] }
+ assertEquals(4, f37({}, 4));
+ function f38({x = () => { 'use strict'; return eval("a") }}, ...a) { return x()[0] }
+ assertEquals(4, f38({}, 4));
+ function f39({x = () => eval("'use strict'; a")}, ...a) { return x()[0] }
+ assertEquals(4, f39({}, 4));
+
+ // var g30 = ({x = a}, ...a) => {};
+ // assertThrows(() => g30({}), ReferenceError);
+ // var g31 = ({x = eval("a")}, ...a) => {};
+ // assertThrows(() => g31({}), ReferenceError);
+ // var g32 = ({x = eval("a")}, ...a) => { 'use strict'; };
+ // assertThrows(() => g32({}), ReferenceError);
+ // var g33 = ({x = eval("'use strict'; a")}, ...a) => {};
+ // assertThrows(() => g33({}), ReferenceError);
+ var g34 = ({x = function() { return a }}, ...a) => { return x()[0] };
+ assertEquals(4, g34({}, 4));
+ var g35 = ({x = () => a}, ...a) => { return x()[0] };
+ assertEquals(4, g35({}, 4));
+})();
+
+
(function TestDuplicatesInParameters() {
assertThrows("'use strict';function f(x,x){}", SyntaxError);
assertThrows("'use strict';function f({x,x}){}", SyntaxError);
@@ -724,12 +915,38 @@
assertThrows("'use strict';var f = ({x,x}) => {};", SyntaxError);
assertThrows("'use strict';var f = (x, {x}) => {};", SyntaxError);
- function ok(x) { var x; }; ok();
+ function ok1(x) { var x; return x; };
+ assertEquals(1, ok1(1));
+ function ok2(x) { 'use strict'; { let x = 2; return x; } };
+ assertEquals(2, ok2(1));
+
assertThrows("function f({x}) { var x; }; f({});", SyntaxError);
+ assertThrows("function f({x}) { { var x; } }; f({});", SyntaxError);
+ assertThrows("'use strict'; function f(x) { let x = 0; }; f({});", SyntaxError);
assertThrows("'use strict'; function f({x}) { let x = 0; }; f({});", SyntaxError);
}());
+(function TestArgumentsForNonSimpleParameters() {
+ function f1({}, x) { arguments[1] = 0; return x }
+ assertEquals(6, f1({}, 6));
+ function f2({}, x) { x = 2; return arguments[1] }
+ assertEquals(7, f2({}, 7));
+ function f3(x, {}) { arguments[0] = 0; return x }
+ assertEquals(6, f3(6, {}));
+ function f4(x, {}) { x = 2; return arguments[0] }
+ assertEquals(7, f4(7, {}));
+ function f5(x, ...a) { arguments[0] = 0; return x }
+ assertEquals(6, f5(6, {}));
+ function f6(x, ...a) { x = 2; return arguments[0] }
+ assertEquals(6, f6(6, {}));
+ function f7({a: x}) { x = 2; return arguments[0].a }
+ assertEquals(5, f7({a: 5}));
+ function f8(x, ...a) { a = []; return arguments[1] }
+ assertEquals(6, f8(5, 6));
+}());
+
+
(function TestForInOfTDZ() {
assertThrows("'use strict'; let x = {}; for (let [x, y] of {x});", ReferenceError);
assertThrows("'use strict'; let x = {}; for (let [y, x] of {x});", ReferenceError);
diff --git a/deps/v8/test/mjsunit/harmony/futex.js b/deps/v8/test/mjsunit/harmony/futex.js
new file mode 100644
index 0000000000..c7e1f5ce2a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/futex.js
@@ -0,0 +1,274 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-atomics --harmony-sharedarraybuffer
+
+(function TestFailsWithNonSharedArray() {
+ var ab = new ArrayBuffer(16);
+
+ var i8a = new Int8Array(ab);
+ var i16a = new Int16Array(ab);
+ var i32a = new Int32Array(ab);
+ var ui8a = new Uint8Array(ab);
+ var ui8ca = new Uint8ClampedArray(ab);
+ var ui16a = new Uint16Array(ab);
+ var ui32a = new Uint32Array(ab);
+ var f32a = new Float32Array(ab);
+ var f64a = new Float64Array(ab);
+
+ [i8a, i16a, i32a, ui8a, ui8ca, ui16a, ui32a, f32a, f64a].forEach(function(
+ ta) {
+ assertThrows(function() { Atomics.futexWait(ta, 0, 0); });
+ assertThrows(function() { Atomics.futexWake(ta, 0, 1); });
+ assertThrows(function() { Atomics.futexWakeOrRequeue(ta, 0, 1, 0, 0); });
+ });
+})();
+
+(function TestFailsWithNonSharedInt32Array() {
+ var sab = new SharedArrayBuffer(16);
+
+ var i8a = new Int8Array(sab);
+ var i16a = new Int16Array(sab);
+ var ui8a = new Uint8Array(sab);
+ var ui8ca = new Uint8ClampedArray(sab);
+ var ui16a = new Uint16Array(sab);
+ var ui32a = new Uint32Array(sab);
+ var f32a = new Float32Array(sab);
+ var f64a = new Float64Array(sab);
+
+ [i8a, i16a, ui8a, ui8ca, ui16a, ui32a, f32a, f64a].forEach(function(
+ ta) {
+ assertThrows(function() { Atomics.futexWait(ta, 0, 0); });
+ assertThrows(function() { Atomics.futexWake(ta, 0, 1); });
+ assertThrows(function() { Atomics.futexWakeOrRequeue(ta, 0, 1, 0, 0); });
+ });
+})();
+
+(function TestInvalidIndex() {
+ var i32a = new Int32Array(new SharedArrayBuffer(16));
+
+ // Valid indexes are 0-3.
+ [-1, 4, 100].forEach(function(invalidIndex) {
+ assertEquals(undefined, Atomics.futexWait(i32a, invalidIndex, 0));
+ assertEquals(undefined, Atomics.futexWake(i32a, invalidIndex, 0));
+ var validIndex = 0;
+ assertEquals(undefined, Atomics.futexWakeOrRequeue(i32a, invalidIndex, 0, 0,
+ validIndex));
+ assertEquals(undefined, Atomics.futexWakeOrRequeue(i32a, validIndex, 0, 0,
+ invalidIndex));
+ });
+
+})();
+
+(function TestWaitTimeout() {
+ var i32a = new Int32Array(new SharedArrayBuffer(16));
+ var waitMs = 100;
+ var startTime = new Date();
+ assertEquals(Atomics.TIMEDOUT, Atomics.futexWait(i32a, 0, 0, waitMs));
+ var endTime = new Date();
+ assertTrue(endTime - startTime >= waitMs);
+})();
+
+(function TestWaitNotEqual() {
+ var i32a = new Int32Array(new SharedArrayBuffer(16));
+ assertEquals(Atomics.NOTEQUAL, Atomics.futexWait(i32a, 0, 42));
+})();
+
+(function TestWaitNegativeTimeout() {
+ var i32a = new Int32Array(new SharedArrayBuffer(16));
+ assertEquals(Atomics.TIMEDOUT, Atomics.futexWait(i32a, 0, 0, -1));
+ assertEquals(Atomics.TIMEDOUT, Atomics.futexWait(i32a, 0, 0, -Infinity));
+})();
+
+//// WORKER ONLY TESTS
+
+if (this.Worker) {
+
+ var TestWaitWithTimeout = function(timeout) {
+ var sab = new SharedArrayBuffer(16);
+ var i32a = new Int32Array(sab);
+
+ var workerScript =
+ `onmessage = function(sab) {
+ var i32a = new Int32Array(sab);
+ var result = Atomics.futexWait(i32a, 0, 0, ${timeout});
+ postMessage(result);
+ };`;
+
+ var worker = new Worker(workerScript);
+ worker.postMessage(sab, [sab]);
+
+ // Spin until the worker is waiting on the futex.
+ while (%AtomicsFutexNumWaitersForTesting(i32a, 0) != 1) {}
+
+ Atomics.futexWake(i32a, 0, 1);
+ assertEquals(Atomics.OK, worker.getMessage());
+ worker.terminate();
+ };
+
+ // Test various infinite timeouts
+ TestWaitWithTimeout(undefined);
+ TestWaitWithTimeout(NaN);
+ TestWaitWithTimeout(Infinity);
+
+
+ (function TestWakeMulti() {
+ var sab = new SharedArrayBuffer(20);
+ var i32a = new Int32Array(sab);
+
+ // SAB values:
+ // i32a[id], where id in range [0, 3]:
+ // 0 => Worker |id| is still waiting on the futex
+ // 1 => Worker |id| is not waiting on futex, but has not be reaped by the
+ // main thread.
+ // 2 => Worker |id| has been reaped.
+ //
+ // i32a[4]:
+ // always 0. Each worker is waiting on this index.
+
+ var workerScript =
+ `onmessage = function(msg) {
+ var id = msg.id;
+ var i32a = new Int32Array(msg.sab);
+
+ // Wait on i32a[4] (should be zero).
+ var result = Atomics.futexWait(i32a, 4, 0);
+ // Set i32a[id] to 1 to notify the main thread which workers were
+ // woken up.
+ Atomics.store(i32a, id, 1);
+ postMessage(result);
+ };`;
+
+ var id;
+ var workers = [];
+ for (id = 0; id < 4; id++) {
+ workers[id] = new Worker(workerScript);
+ workers[id].postMessage({sab: sab, id: id}, [sab]);
+ }
+
+ // Spin until all workers are waiting on the futex.
+ while (%AtomicsFutexNumWaitersForTesting(i32a, 4) != 4) {}
+
+ // Wake up three waiters.
+ assertEquals(3, Atomics.futexWake(i32a, 4, 3));
+
+ var wokenCount = 0;
+ var waitingId = 0 + 1 + 2 + 3;
+ while (wokenCount < 3) {
+ for (id = 0; id < 4; id++) {
+ // Look for workers that have not yet been reaped. Set i32a[id] to 2
+ // when they've been processed so we don't look at them again.
+ if (Atomics.compareExchange(i32a, id, 1, 2) == 1) {
+ assertEquals(Atomics.OK, workers[id].getMessage());
+ workers[id].terminate();
+ waitingId -= id;
+ wokenCount++;
+ }
+ }
+ }
+
+ assertEquals(3, wokenCount);
+ assertEquals(0, Atomics.load(i32a, waitingId));
+ assertEquals(1, %AtomicsFutexNumWaitersForTesting(i32a, 4));
+
+ // Finally wake the last waiter.
+ assertEquals(1, Atomics.futexWake(i32a, 4, 1));
+ assertEquals(Atomics.OK, workers[waitingId].getMessage());
+ workers[waitingId].terminate();
+
+ assertEquals(0, %AtomicsFutexNumWaitersForTesting(i32a, 4));
+
+ })();
+
+ (function TestWakeOrRequeue() {
+ var sab = new SharedArrayBuffer(24);
+ var i32a = new Int32Array(sab);
+
+ // SAB values:
+ // i32a[id], where id in range [0, 3]:
+ // 0 => Worker |id| is still waiting on the futex
+ // 1 => Worker |id| is not waiting on futex, but has not be reaped by the
+ // main thread.
+ // 2 => Worker |id| has been reaped.
+ //
+ // i32a[4]:
+ // always 0. Each worker will initially wait on this index.
+ //
+ // i32a[5]:
+ // always 0. Requeued workers will wait on this index.
+
+ var workerScript =
+ `onmessage = function(msg) {
+ var id = msg.id;
+ var i32a = new Int32Array(msg.sab);
+
+ var result = Atomics.futexWait(i32a, 4, 0, Infinity);
+ Atomics.store(i32a, id, 1);
+ postMessage(result);
+ };`;
+
+ var workers = [];
+ for (id = 0; id < 4; id++) {
+ workers[id] = new Worker(workerScript);
+ workers[id].postMessage({sab: sab, id: id}, [sab]);
+ }
+
+ // Spin until all workers are waiting on the futex.
+ while (%AtomicsFutexNumWaitersForTesting(i32a, 4) != 4) {}
+
+ var index1 = 4;
+ var index2 = 5;
+
+ // If futexWakeOrRequeue is called with the incorrect value, it shouldn't
+ // wake any waiters.
+ assertEquals(Atomics.NOTEQUAL,
+ Atomics.futexWakeOrRequeue(i32a, index1, 1, 42, index2));
+
+ assertEquals(4, %AtomicsFutexNumWaitersForTesting(i32a, index1));
+ assertEquals(0, %AtomicsFutexNumWaitersForTesting(i32a, index2));
+
+ // Now wake with the correct value.
+ assertEquals(1, Atomics.futexWakeOrRequeue(i32a, index1, 1, 0, index2));
+
+ // The workers that are still waiting should atomically be transferred to
+ // the new index.
+ assertEquals(3, %AtomicsFutexNumWaitersForTesting(i32a, index2));
+
+ // The woken worker may not have been scheduled yet. Look for which thread
+ // has set its i32a value to 1.
+ var wokenCount = 0;
+ while (wokenCount < 1) {
+ for (id = 0; id < 4; id++) {
+ if (Atomics.compareExchange(i32a, id, 1, 2) == 1) {
+ wokenCount++;
+ }
+ }
+ }
+
+ assertEquals(0, %AtomicsFutexNumWaitersForTesting(i32a, index1));
+
+ // Wake the remaining waiters.
+ assertEquals(3, Atomics.futexWake(i32a, index2, 3));
+
+ // As above, wait until the workers have been scheduled.
+ wokenCount = 0;
+ while (wokenCount < 3) {
+ for (id = 0; id < 4; id++) {
+ if (Atomics.compareExchange(i32a, id, 1, 2) == 1) {
+ wokenCount++;
+ }
+ }
+ }
+
+ assertEquals(0, %AtomicsFutexNumWaitersForTesting(i32a, index1));
+ assertEquals(0, %AtomicsFutexNumWaitersForTesting(i32a, index2));
+
+ for (id = 0; id < 4; ++id) {
+ assertEquals(Atomics.OK, workers[id].getMessage());
+ workers[id].terminate();
+ }
+
+ })();
+
+}
diff --git a/deps/v8/test/mjsunit/harmony/new-target.js b/deps/v8/test/mjsunit/harmony/new-target.js
index 587461a958..d98f5f8098 100644
--- a/deps/v8/test/mjsunit/harmony/new-target.js
+++ b/deps/v8/test/mjsunit/harmony/new-target.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-classes --harmony-new-target --harmony-reflect
+// Flags: --harmony-new-target --harmony-reflect --harmony-destructuring
// Flags: --harmony-rest-parameters --harmony-arrow-functions
@@ -368,3 +368,31 @@
a2 = 3;
f(1, 2, 3);
})();
+
+
+(function TestOtherScopes() {
+ function f1() { return eval("'use strict'; new.target") }
+ assertSame(f1, new f1);
+ function f2() { with ({}) return new.target }
+ assertSame(f2, new f2);
+ function f3({a}) { return new.target }
+ assertSame(f3, new f3({}));
+ function f4(...a) { return new.target }
+ assertSame(f4, new f4);
+ function f5() { 'use strict'; { let x; return new.target } }
+ assertSame(f5, new f5);
+ function f6() { with ({'new.target': 42}) return new.target }
+ assertSame(f6, new f6);
+})();
+
+
+(function TestEarlyErrors() {
+ assertThrows(function() { Function("new.target = 42"); }, ReferenceError);
+ assertThrows(function() { Function("var foo = 1; new.target = foo = 42"); }, ReferenceError);
+ assertThrows(function() { Function("var foo = 1; foo = new.target = 42"); }, ReferenceError);
+ assertThrows(function() { Function("new.target--"); }, ReferenceError);
+ assertThrows(function() { Function("--new.target"); }, ReferenceError);
+ assertThrows(function() { Function("(new.target)++"); }, ReferenceError);
+ assertThrows(function() { Function("++(new.target)"); }, ReferenceError);
+ assertThrows(function() { Function("for (new.target of {});"); }, SyntaxError);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies.js b/deps/v8/test/mjsunit/harmony/proxies.js
index 585574eb43..f1d37b445a 100644
--- a/deps/v8/test/mjsunit/harmony/proxies.js
+++ b/deps/v8/test/mjsunit/harmony/proxies.js
@@ -382,6 +382,10 @@ function TestSet2(create, handler) {
assertEquals(46, (function(n) { return p[n] = 46 })(99))
assertEquals("99", key)
assertEquals(46, val)
+
+ assertEquals(47, p["0"] = 47)
+ assertEquals("0", key)
+ assertEquals(47, val)
}
TestSet({
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-405844.js b/deps/v8/test/mjsunit/harmony/regress/regress-405844.js
index fbe7310d79..3d3561f7a5 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-405844.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-405844.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-proxies
+// Flags: --harmony-proxies --harmony-object-observe
var proxy = Proxy.create({ fix: function() { return {}; } });
Object.preventExtensions(proxy);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4298.js b/deps/v8/test/mjsunit/harmony/regress/regress-4298.js
new file mode 100644
index 0000000000..98e69d1acf
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-4298.js
@@ -0,0 +1,8 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-spread-arrays
+
+var arr = [1, 2, ...[3]];
+assertEquals([1, 2, 3], arr);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4417.js b/deps/v8/test/mjsunit/harmony/regress/regress-4417.js
new file mode 100644
index 0000000000..fb773f5fac
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-4417.js
@@ -0,0 +1,12 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-spread-arrays
+
+var arr = [1, 2, 3];
+assertEquals({arr: [1, 2, 3]}, {arr: [...arr]});
+assertEquals([[1, 2, 3]], [[...arr]]);
+
+assertEquals({arr: [6, 5, [1, 2, 3]]}, {arr: [6, 5, [...arr]]});
+assertEquals([8, 7, [6, 5, [1, 2, 3]]], [8, 7, [6, 5, [...arr]]]);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-513474.js b/deps/v8/test/mjsunit/harmony/regress/regress-513474.js
new file mode 100644
index 0000000000..ec4bc84a30
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-513474.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-rest-parameters
+
+(function(...a) { function f() { eval() } })();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-517455.js b/deps/v8/test/mjsunit/harmony/regress/regress-517455.js
new file mode 100644
index 0000000000..a59fa181b7
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-517455.js
@@ -0,0 +1,8 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-destructuring
+
+function f({x = ""}) { eval(x) }
+f({})
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-451770.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-451770.js
index eaf1d1961f..770c8073cf 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-451770.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-451770.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-computed-property-names --harmony-sloppy
+// Flags: --harmony-sloppy
assertThrows(function f() {
var t = { toString: function() { throw new Error(); } };
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-observe-empty-double-array.js b/deps/v8/test/mjsunit/harmony/regress/regress-observe-empty-double-array.js
index 301ece70fe..1460889f45 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-observe-empty-double-array.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-observe-empty-double-array.js
@@ -25,6 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --harmony-object-observe
// Flags: --allow-natives-syntax
//
// Test passes if it does not crash.
diff --git a/deps/v8/test/mjsunit/harmony/simd.js b/deps/v8/test/mjsunit/harmony/simd.js
new file mode 100644
index 0000000000..0c52072646
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/simd.js
@@ -0,0 +1,560 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-simd --harmony-tostring --harmony-reflect
+// Flags: --allow-natives-syntax --expose-natives-as natives --noalways-opt
+
+function lanesForType(typeName) {
+ // The lane count follows the first 'x' in the type name, which begins with
+ // 'float', 'int', or 'bool'.
+ return Number.parseInt(typeName.substr(typeName.indexOf('x') + 1));
+}
+
+
+// Creates an instance that has been zeroed, so it can be used for equality
+// testing.
+function createInstance(type) {
+ // Provide enough parameters for the longest type (currently 16). It's
+ // important that instances be consistent to better test that different SIMD
+ // types can't be compared and are never equal or the same in any sense.
+ return SIMD[type](0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+}
+
+
+function isValidSimdString(string, value, type, lanes) {
+ var simdFn = SIMD[type],
+ parseFn =
+ type.indexOf('Float') === 0 ? Number.parseFloat : Number.parseInt,
+ indexOfOpenParen = string.indexOf('(');
+ // Check prefix (e.g. SIMD.Float32x4.)
+ if (string.substr(0, indexOfOpenParen) !== 'SIMD.' + type)
+ return false;
+ // Remove type name (e.g. SIMD.Float32x4) and open parenthesis.
+ string = string.substr(indexOfOpenParen + 1);
+ var laneStrings = string.split(',');
+ if (laneStrings.length !== lanes)
+ return false;
+ for (var i = 0; i < lanes; i++) {
+ var fromString = parseFn(laneStrings[i]),
+ fromValue = simdFn.extractLane(value, i);
+ if (Math.abs(fromString - fromValue) > Number.EPSILON)
+ return false;
+ }
+ return true;
+}
+
+
+var simdTypeNames = ['Float32x4', 'Int32x4', 'Bool32x4',
+ 'Int16x8', 'Bool16x8',
+ 'Int8x16', 'Bool8x16'];
+
+var nonSimdValues = [347, 1.275, NaN, "string", null, undefined, {},
+ function() {}];
+
+function checkTypeMatrix(type, fn) {
+ // Check against non-SIMD types.
+ nonSimdValues.forEach(fn);
+ // Check against SIMD values of a different type.
+ for (var i = 0; i < simdTypeNames.length; i++) {
+ var otherType = simdTypeNames[i];
+ if (type != otherType) fn(createInstance(otherType));
+ }
+}
+
+
+// Test different forms of constructor calls.
+function TestConstructor(type, lanes) {
+ var simdFn = SIMD[type];
+ var instance = createInstance(type);
+
+ assertFalse(Object === simdFn.prototype.constructor)
+ assertFalse(simdFn === Object.prototype.constructor)
+ assertSame(simdFn, simdFn.prototype.constructor)
+
+ assertSame(simdFn, instance.__proto__.constructor)
+ assertSame(simdFn, Object(instance).__proto__.constructor)
+ assertSame(simdFn.prototype, instance.__proto__)
+ assertSame(simdFn.prototype, Object(instance).__proto__)
+}
+
+
+function TestType(type, lanes) {
+ var simdFn = SIMD[type];
+ var instance = createInstance(type);
+ var typeofString = type.charAt(0).toLowerCase() + type.slice(1);
+
+ assertEquals(typeofString, typeof instance)
+ assertTrue(typeof instance === typeofString)
+ assertTrue(typeof Object(instance) === 'object')
+ assertEquals(null, %_ClassOf(instance))
+ assertEquals(type, %_ClassOf(Object(instance)))
+}
+
+
+function TestPrototype(type, lanes) {
+ var simdFn = SIMD[type];
+ var instance = createInstance(type);
+
+ assertSame(Object.prototype, simdFn.prototype.__proto__)
+ assertSame(simdFn.prototype, instance.__proto__)
+ assertSame(simdFn.prototype, Object(instance).__proto__)
+}
+
+
+function TestValueOf(type, lanes) {
+ var simdFn = SIMD[type];
+ var instance = createInstance(type);
+
+ assertTrue(instance === Object(instance).valueOf())
+ assertTrue(instance === instance.valueOf())
+ assertTrue(simdFn.prototype.valueOf.call(Object(instance)) === instance)
+ assertTrue(simdFn.prototype.valueOf.call(instance) === instance)
+}
+
+
+function TestGet(type, lanes) {
+ var simdFn = SIMD[type];
+ var instance = createInstance(type);
+
+ assertEquals(undefined, instance.a)
+ assertEquals(undefined, instance["a" + "b"])
+ assertEquals(undefined, instance["" + "1"])
+ assertEquals(undefined, instance[42])
+}
+
+
+function TestToBoolean(type, lanes) {
+ var simdFn = SIMD[type];
+ var instance = createInstance(type);
+
+ assertTrue(Boolean(Object(instance)))
+ assertFalse(!Object(instance))
+ assertTrue(Boolean(instance).valueOf())
+ assertFalse(!instance)
+ assertTrue(!!instance)
+ assertTrue(instance && true)
+ assertFalse(!instance && false)
+ assertTrue(!instance || true)
+ assertEquals(1, instance ? 1 : 2)
+ assertEquals(2, !instance ? 1 : 2)
+ if (!instance) assertUnreachable();
+ if (instance) {} else assertUnreachable();
+}
+
+
+function TestToString(type, lanes) {
+ var simdFn = SIMD[type];
+ var instance = createInstance(type);
+
+ assertEquals(instance.toString(), String(instance))
+ assertTrue(isValidSimdString(instance.toString(), instance, type, lanes))
+ assertTrue(
+ isValidSimdString(Object(instance).toString(), instance, type, lanes))
+ assertTrue(isValidSimdString(
+ simdFn.prototype.toString.call(instance), instance, type, lanes))
+}
+
+
+function TestToNumber(type, lanes) {
+ var simdFn = SIMD[type];
+ var instance = createInstance(type);
+
+ assertThrows(function() { Number(Object(instance)) }, TypeError)
+ assertThrows(function() { +Object(instance) }, TypeError)
+ assertThrows(function() { Number(instance) }, TypeError)
+ assertThrows(function() { instance + 0 }, TypeError)
+}
+
+
+function TestCoercions(type, lanes) {
+ var simdFn = SIMD[type];
+ var instance = createInstance(type);
+ // Test that setting a lane to value 'a' results in a lane with value 'b'.
+ function test(a, b) {
+ for (var i = 0; i < lanes; i++) {
+ var ainstance = simdFn.replaceLane(instance, i, a);
+ var lane_value = simdFn.extractLane(ainstance, i);
+ assertSame(b, lane_value);
+ }
+ }
+
+ switch (type) {
+ case 'Float32x4':
+ test(0, 0);
+ test(-0, -0);
+ test(NaN, NaN);
+ test(null, 0);
+ test(undefined, NaN);
+ test("5.25", 5.25);
+ test(Number.MAX_VALUE, Infinity);
+ test(-Number.MAX_VALUE, -Infinity);
+ test(Number.MIN_VALUE, 0);
+ break;
+ case 'Int32x4':
+ test(Infinity, 0);
+ test(-Infinity, 0);
+ test(NaN, 0);
+ test(0, 0);
+ test(-0, 0);
+ test(Number.MIN_VALUE, 0);
+ test(-Number.MIN_VALUE, 0);
+ test(0.1, 0);
+ test(-0.1, 0);
+ test(1, 1);
+ test(1.1, 1);
+ test(-1, -1);
+ test(-1.6, -1);
+ test(2147483647, 2147483647);
+ test(2147483648, -2147483648);
+ test(2147483649, -2147483647);
+ test(4294967295, -1);
+ test(4294967296, 0);
+ test(4294967297, 1);
+ break;
+ case 'Int16x8':
+ test(Infinity, 0);
+ test(-Infinity, 0);
+ test(NaN, 0);
+ test(0, 0);
+ test(-0, 0);
+ test(Number.MIN_VALUE, 0);
+ test(-Number.MIN_VALUE, 0);
+ test(0.1, 0);
+ test(-0.1, 0);
+ test(1, 1);
+ test(1.1, 1);
+ test(-1, -1);
+ test(-1.6, -1);
+ test(32767, 32767);
+ test(32768, -32768);
+ test(32769, -32767);
+ test(65535, -1);
+ test(65536, 0);
+ test(65537, 1);
+ break;
+ case 'Int8x16':
+ test(Infinity, 0);
+ test(-Infinity, 0);
+ test(NaN, 0);
+ test(0, 0);
+ test(-0, 0);
+ test(Number.MIN_VALUE, 0);
+ test(-Number.MIN_VALUE, 0);
+ test(0.1, 0);
+ test(-0.1, 0);
+ test(1, 1);
+ test(1.1, 1);
+ test(-1, -1);
+ test(-1.6, -1);
+ test(127, 127);
+ test(128, -128);
+ test(129, -127);
+ test(255, -1);
+ test(256, 0);
+ test(257, 1);
+ break;
+ case 'Bool32x4':
+ case 'Bool16x8':
+ case 'Bool8x16':
+ test(true, true);
+ test(false, false);
+ test(0, false);
+ test(1, true);
+ test(0.1, true);
+ test(NaN, false);
+ test(null, false);
+ test("", false);
+ test("false", true);
+ break;
+ }
+}
+
+
+function TestEquality(type, lanes) {
+ var simdFn = SIMD[type];
+ var instance = createInstance(type);
+
+ // Every SIMD value should equal itself, and non-strictly equal its wrapper.
+ assertSame(instance, instance)
+ assertEquals(instance, instance)
+ assertTrue(Object.is(instance, instance))
+ assertTrue(instance === instance)
+ assertTrue(instance == instance)
+ assertFalse(instance === Object(instance))
+ assertFalse(Object(instance) === instance)
+ assertFalse(instance == Object(instance))
+ assertFalse(Object(instance) == instance)
+ assertTrue(instance === instance.valueOf())
+ assertTrue(instance.valueOf() === instance)
+ assertTrue(instance == instance.valueOf())
+ assertTrue(instance.valueOf() == instance)
+ assertFalse(Object(instance) === Object(instance))
+ assertEquals(Object(instance).valueOf(), Object(instance).valueOf())
+
+ function notEqual(other) {
+ assertFalse(instance === other)
+ assertFalse(other === instance)
+ assertFalse(instance == other)
+ assertFalse(other == instance)
+ }
+
+ // SIMD values should not be equal to instances of different types.
+ checkTypeMatrix(type, function(other) {
+ assertFalse(instance === other)
+ assertFalse(other === instance)
+ assertFalse(instance == other)
+ assertFalse(other == instance)
+ });
+
+ // Test that f(a, b) is the same as f(SIMD(a), SIMD(b)) for equality and
+ // strict equality, at every lane.
+ function test(a, b) {
+ for (var i = 0; i < lanes; i++) {
+ var aval = simdFn.replaceLane(instance, i, a);
+ var bval = simdFn.replaceLane(instance, i, b);
+ assertSame(a == b, aval == bval);
+ assertSame(a === b, aval === bval);
+ }
+ }
+
+ switch (type) {
+ case 'Float32x4':
+ test(1, 2.5);
+ test(1, 1);
+ test(0, 0);
+ test(-0, +0);
+ test(+0, -0);
+ test(-0, -0);
+ test(0, NaN);
+ test(NaN, NaN);
+ break;
+ case 'Int32x4':
+ case 'Int16x8':
+ case 'Int8x16':
+ test(1, 2);
+ test(1, 1);
+ test(1, -1);
+ break;
+ case 'Bool32x4':
+ case 'Bool16x8':
+ case 'Bool8x16':
+ test(true, false);
+ test(false, true);
+ break;
+ }
+}
+
+
+function TestSameValue(type, lanes) {
+ var simdFn = SIMD[type];
+ var instance = createInstance(type);
+ var sameValue = natives.$sameValue;
+ var sameValueZero = natives.$sameValueZero;
+
+ // SIMD values should not be the same as instances of different types.
+ checkTypeMatrix(type, function(other) {
+ assertFalse(sameValue(instance, other));
+ assertFalse(sameValueZero(instance, other));
+ });
+
+ // Test that f(a, b) is the same as f(SIMD(a), SIMD(b)) for sameValue and
+ // sameValueZero, at every lane.
+ function test(a, b) {
+ for (var i = 0; i < lanes; i++) {
+ var aval = simdFn.replaceLane(instance, i, a);
+ var bval = simdFn.replaceLane(instance, i, b);
+ assertSame(sameValue(a, b), sameValue(aval, bval));
+ assertSame(sameValueZero(a, b), sameValueZero(aval, bval));
+ }
+ }
+
+ switch (type) {
+ case 'Float32x4':
+ test(1, 2.5);
+ test(1, 1);
+ test(0, 0);
+ test(-0, +0);
+ test(+0, -0);
+ test(-0, -0);
+ test(0, NaN);
+ test(NaN, NaN);
+ break;
+ case 'Int32x4':
+ case 'Int16x8':
+ case 'Int8x16':
+ test(1, 2);
+ test(1, 1);
+ test(1, -1);
+ break;
+ case 'Bool32x4':
+ case 'Bool16x8':
+ case 'Bool8x16':
+ test(true, false);
+ test(false, true);
+ break;
+ }
+}
+
+
+function TestComparison(type, lanes) {
+ var simdFn = SIMD[type];
+ var a = createInstance(type), b = createInstance(type);
+
+ function compare(other) {
+ var throwFuncs = [
+ function lt() { a < b; },
+ function gt() { a > b; },
+ function le() { a <= b; },
+ function ge() { a >= b; },
+ function lt_same() { a < a; },
+ function gt_same() { a > a; },
+ function le_same() { a <= a; },
+ function ge_same() { a >= a; },
+ ];
+
+ for (var f of throwFuncs) {
+ assertThrows(f, TypeError);
+ %OptimizeFunctionOnNextCall(f);
+ assertThrows(f, TypeError);
+ assertThrows(f, TypeError);
+ }
+ }
+
+ // Test comparison against the same SIMD type.
+ compare(b);
+ // Test comparison against other types.
+ checkTypeMatrix(type, compare);
+}
+
+
+// Test SIMD value wrapping/boxing over non-builtins.
+function TestCall(type, lanes) {
+ var simdFn = SIMD[type];
+ var instance = createInstance(type);
+ simdFn.prototype.getThisProto = function () {
+ return Object.getPrototypeOf(this);
+ }
+ assertTrue(instance.getThisProto() === simdFn.prototype)
+}
+
+
+function TestAsSetKey(type, lanes, set) {
+ var simdFn = SIMD[type];
+ var instance = createInstance(type);
+
+ function test(set, key) {
+ assertFalse(set.has(key));
+ assertFalse(set.delete(key));
+ if (!(set instanceof WeakSet)) {
+ assertSame(set, set.add(key));
+ assertTrue(set.has(key));
+ assertTrue(set.delete(key));
+ } else {
+ // SIMD values can't be used as keys in WeakSets.
+ assertThrows(function() { set.add(key) });
+ }
+ assertFalse(set.has(key));
+ assertFalse(set.delete(key));
+ assertFalse(set.has(key));
+ }
+
+ test(set, instance);
+}
+
+
+function TestAsMapKey(type, lanes, map) {
+ var simdFn = SIMD[type];
+ var instance = createInstance(type);
+
+ function test(map, key, value) {
+ assertFalse(map.has(key));
+ assertSame(undefined, map.get(key));
+ assertFalse(map.delete(key));
+ if (!(map instanceof WeakMap)) {
+ assertSame(map, map.set(key, value));
+ assertSame(value, map.get(key));
+ assertTrue(map.has(key));
+ assertTrue(map.delete(key));
+ } else {
+ // SIMD values can't be used as keys in WeakMaps.
+ assertThrows(function() { map.set(key, value) });
+ }
+ assertFalse(map.has(key));
+ assertSame(undefined, map.get(key));
+ assertFalse(map.delete(key));
+ assertFalse(map.has(key));
+ assertSame(undefined, map.get(key));
+ }
+
+ test(map, instance, {});
+}
+
+
+// Test SIMD type with Harmony reflect-apply.
+function TestReflectApply(type) {
+ var simdFn = SIMD[type];
+ var instance = createInstance(type);
+
+ function returnThis() { return this; }
+ function returnThisStrict() { 'use strict'; return this; }
+ function noop() {}
+ function noopStrict() { 'use strict'; }
+ var R = void 0;
+
+ assertSame(SIMD[type].prototype,
+ Object.getPrototypeOf(
+ Reflect.apply(returnThis, instance, [])));
+ assertSame(instance, Reflect.apply(returnThisStrict, instance, []));
+
+ assertThrows(
+ function() { 'use strict'; Reflect.apply(instance); }, TypeError);
+ assertThrows(
+ function() { Reflect.apply(instance); }, TypeError);
+ assertThrows(
+ function() { Reflect.apply(noopStrict, R, instance); }, TypeError);
+ assertThrows(
+ function() { Reflect.apply(noop, R, instance); }, TypeError);
+}
+
+
+function TestSIMDTypes() {
+ for (var i = 0; i < simdTypeNames.length; ++i) {
+ var type = simdTypeNames[i],
+ lanes = lanesForType(type);
+ TestConstructor(type, lanes);
+ TestType(type, lanes);
+ TestPrototype(type, lanes);
+ TestValueOf(type, lanes);
+ TestGet(type, lanes);
+ TestToBoolean(type, lanes);
+ TestToString(type, lanes);
+ TestToNumber(type, lanes);
+ TestCoercions(type, lanes);
+ TestEquality(type, lanes);
+ TestSameValue(type, lanes);
+ TestComparison(type, lanes);
+ TestCall(type, lanes);
+ TestAsSetKey(type, lanes, new Set);
+ TestAsSetKey(type, lanes, new WeakSet);
+ TestAsMapKey(type, lanes, new Map);
+ TestAsMapKey(type, lanes, new WeakMap);
+ TestReflectApply(type);
+ }
+}
+TestSIMDTypes();
+
+// Tests for the global SIMD object.
+function TestSIMDObject() {
+ assertSame(typeof SIMD, 'object');
+ assertSame(SIMD.constructor, Object);
+ assertSame(Object.getPrototypeOf(SIMD), Object.prototype);
+ assertSame(SIMD + "", "[object SIMD]");
+ // The SIMD object is mutable.
+ SIMD.foo = "foo";
+ assertSame(SIMD.foo, "foo");
+ delete SIMD.foo;
+ delete SIMD.Bool8x16;
+ assertSame(SIMD.Bool8x16, undefined);
+}
+TestSIMDObject()
diff --git a/deps/v8/test/mjsunit/harmony/super.js b/deps/v8/test/mjsunit/harmony/super.js
index 21b31d96c9..601addaa0e 100644
--- a/deps/v8/test/mjsunit/harmony/super.js
+++ b/deps/v8/test/mjsunit/harmony/super.js
@@ -3,7 +3,8 @@
// found in the LICENSE file.
// Flags: --harmony-arrow-functions --allow-natives-syntax
-// Flags: --harmony-spreadcalls
+// Flags: --harmony-spreadcalls --harmony-destructuring
+// Flags: --harmony-rest-parameters --harmony-sloppy
(function TestSuperNamedLoads() {
function Base() { }
@@ -1979,7 +1980,8 @@ TestKeyedSetterCreatingOwnPropertiesNonConfigurable(42, 43, 44);
class Derived extends Base {
constructor() {
- super();
+ let r = super();
+ assertEquals(this, r);
derivedCalled++;
}
}
@@ -1995,7 +1997,8 @@ TestKeyedSetterCreatingOwnPropertiesNonConfigurable(42, 43, 44);
class DerivedDerived extends Derived {
constructor() {
- super();
+ let r = super();
+ assertEquals(this, r);
derivedDerivedCalled++;
}
}
@@ -2015,7 +2018,8 @@ TestKeyedSetterCreatingOwnPropertiesNonConfigurable(42, 43, 44);
}
class Derived2 extends Base2 {
constructor(v1, v2) {
- super(v1);
+ let r = super(v1);
+ assertEquals(this, r);
this.fromDerived = v2;
}
}
@@ -2119,6 +2123,34 @@ TestKeyedSetterCreatingOwnPropertiesNonConfigurable(42, 43, 44);
})();
+(function TestSuperInOtherScopes() {
+ var p = {x: 99};
+ var o0 = {__proto__: p, f() { return eval("'use strict'; super.x") }};
+ assertEquals(p.x, o0.f());
+ var o1 = {__proto__: p, f() { with ({}) return super.x }};
+ assertEquals(p.x, o1.f());
+ var o2 = {__proto__: p, f({a}) { return super.x }};
+ assertEquals(p.x, o2.f({}));
+ var o3 = {__proto__: p, f(...a) { return super.x }};
+ assertEquals(p.x, o3.f());
+ var o4 = {__proto__: p, f() { 'use strict'; { let x; return super.x } }};
+ assertEquals(p.x, o4.f());
+})();
+
+
+(function TestSuperCallInOtherScopes() {
+ class C {constructor() { this.x = 99 }}
+ class D0 extends C {constructor() { eval("'use strict'; super()") }}
+ assertEquals(99, (new D0).x);
+ class D2 extends C {constructor({a}) { super() }}
+ assertEquals(99, (new D2({})).x);
+ class D3 extends C {constructor(...a) { super() }}
+ assertEquals(99, (new D3()).x);
+ class D4 extends C {constructor() { { let x; super() } }}
+ assertEquals(99, (new D4).x);
+})();
+
+
(function TestSuperCallInEval() {
'use strict';
class Base {
@@ -2128,7 +2160,8 @@ TestKeyedSetterCreatingOwnPropertiesNonConfigurable(42, 43, 44);
}
class Derived extends Base {
constructor(x) {
- eval('super(x)');
+ let r = eval('super(x)');
+ assertEquals(this, r);
}
}
let d = new Derived(42);
@@ -2145,7 +2178,8 @@ TestKeyedSetterCreatingOwnPropertiesNonConfigurable(42, 43, 44);
}
class Derived extends Base {
constructor(x) {
- (() => super(x))();
+ let r = (() => super(x))();
+ assertEquals(this, r);
}
}
let d = new Derived(42);
@@ -2181,6 +2215,47 @@ TestKeyedSetterCreatingOwnPropertiesNonConfigurable(42, 43, 44);
})();
+(function TestSuperCallInLoop() {
+ 'use strict';
+ class Base {
+ constructor(x) {
+ this.x = x;
+ }
+ }
+ class Derived extends Base {
+ constructor(x, n) {
+ for (var i = 0; i < n; ++i) {
+ super(x);
+ }
+ }
+ }
+
+ let o = new Derived(23, 1);
+ assertEquals(23, o.x);
+ assertInstanceof(o, Derived);
+
+ assertThrows("new Derived(42, 0)", ReferenceError);
+ assertThrows("new Derived(65, 2)", ReferenceError);
+})();
+
+
+(function TestSuperCallReentrant() {
+ 'use strict';
+ class Base {
+ constructor(fun) {
+ this.x = fun();
+ }
+ }
+ class Derived extends Base {
+ constructor(x) {
+ let f = () => super(() => x)
+ super(f);
+ }
+ }
+ assertThrows("new Derived(23)", ReferenceError);
+})();
+
+
(function TestSuperCallSpreadInEval() {
'use strict';
class Base {
@@ -2190,7 +2265,8 @@ TestKeyedSetterCreatingOwnPropertiesNonConfigurable(42, 43, 44);
}
class Derived extends Base {
constructor(x) {
- eval('super(...[x])');
+ let r = eval('super(...[x])');
+ assertEquals(this, r);
}
}
let d = new Derived(42);
@@ -2207,7 +2283,8 @@ TestKeyedSetterCreatingOwnPropertiesNonConfigurable(42, 43, 44);
}
class Derived extends Base {
constructor(x) {
- (() => super(...[x]))();
+ let r = (() => super(...[x]))();
+ assertEquals(this, r);
}
}
let d = new Derived(42);
diff --git a/deps/v8/test/mjsunit/harmony/typed-array-includes.js b/deps/v8/test/mjsunit/harmony/typed-array-includes.js
new file mode 100644
index 0000000000..017a4301ee
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/typed-array-includes.js
@@ -0,0 +1,203 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-array-includes
+
+// Largely ported from
+// https://github.com/tc39/Array.prototype.includes/tree/master/test/built-ins/TypedArray/prototype/includes
+// using https://www.npmjs.org/package/test262-to-mjsunit with further edits
+
+
+function testTypedArrays(callback) {
+ [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array
+ ]
+ .forEach(callback);
+}
+
+testTypedArrays.floatOnly = function (callback) {
+ [Float32Array, Float64Array].forEach(callback);
+};
+
+
+// %TypedArray%.prototype.includes throws a TypeError when used on non-typed
+// arrays
+(function() {
+ var taIncludes = Uint8Array.prototype.includes;
+
+ assertThrows(function() {
+ taIncludes.call({
+ length: 2,
+ 0: 1,
+ 1: 2
+ }, 2);
+ }, TypeError);
+
+ assertThrows(function() {
+ taIncludes.call([1, 2, 3], 2);
+ }, TypeError);
+
+ assertThrows(function() {
+ taIncludes.call(null, 2);
+ }, TypeError);
+
+ assertThrows(function() {
+ taIncludes.call(undefined, 2);
+ }, TypeError);
+})();
+
+
+// %TypedArray%.prototype.includes should terminate if ToNumber ends up being
+// called on a symbol fromIndex
+(function() {
+ testTypedArrays(function(TypedArrayConstructor) {
+ var ta = new TypedArrayConstructor([1, 2, 3]);
+
+ assertThrows(function() {
+ ta.includes(2, Symbol());
+ }, TypeError);
+ });
+})();
+
+
+// %TypedArray%.prototype.includes should terminate if an exception occurs
+// converting the fromIndex to a number
+(function() {
+ function Test262Error() {}
+
+ var fromIndex = {
+ valueOf: function() {
+ throw new Test262Error();
+ }
+ };
+
+ testTypedArrays(function(TypedArrayConstructor) {
+ var ta = new TypedArrayConstructor([1, 2, 3]);
+
+ assertThrows(function() {
+ ta.includes(2, fromIndex);
+ }, Test262Error);
+ });
+})();
+
+
+// %TypedArray%.prototype.includes should search the whole array, as the
+// optional second argument fromIndex defaults to 0
+(function() {
+ testTypedArrays(function(TypedArrayConstructor) {
+ var ta = new TypedArrayConstructor([1, 2, 3]);
+ assertTrue(ta.includes(1));
+ assertTrue(ta.includes(2));
+ assertTrue(ta.includes(3));
+ });
+})();
+
+
+// %TypedArray%.prototype.includes returns false if fromIndex is greater or
+// equal to the length of the array
+(function() {
+ testTypedArrays(function(TypedArrayConstructor) {
+ var ta = new TypedArrayConstructor([1, 2]);
+ assertFalse(ta.includes(2, 3));
+ assertFalse(ta.includes(2, 2));
+ });
+})();
+
+
+// %TypedArray%.prototype.includes searches the whole array if the computed
+// index from the given negative fromIndex argument is less than 0
+(function() {
+ testTypedArrays(function(TypedArrayConstructor) {
+ var ta = new TypedArrayConstructor([1, 3]);
+ assertTrue(ta.includes(1, -4));
+ assertTrue(ta.includes(1, -4));
+ });
+})();
+
+
+// %TypedArray%.prototype.includes should use a negative value as the offset
+// from the end of the array to compute fromIndex
+(function() {
+ testTypedArrays(function(TypedArrayConstructor) {
+ var ta = new TypedArrayConstructor([12, 13]);
+ assertTrue(ta.includes(13, -1));
+ assertFalse(ta.includes(12, -1));
+ assertTrue(ta.includes(12, -2));
+ });
+})();
+
+
+// %TypedArray%.prototype.includes converts its fromIndex parameter to an
+// integer
+(function() {
+ testTypedArrays(function(TypedArrayConstructor) {
+ var ta = new TypedArrayConstructor([1, 2, 3]);
+ assertFalse(ta.includes(1, 3.3));
+ assertTrue(ta.includes(1, -Infinity));
+ assertTrue(ta.includes(3, 2.9));
+ assertTrue(ta.includes(3, NaN));
+
+ var numberLike = {
+ valueOf: function() {
+ return 2;
+ }
+ };
+
+ assertFalse(ta.includes(1, numberLike));
+ assertFalse(ta.includes(1, "2"));
+ assertTrue(ta.includes(3, numberLike));
+ assertTrue(ta.includes(3, "2"));
+ });
+})();
+
+
+// %TypedArray%.prototype.includes should have length 1
+(function() {
+ assertEquals(1, Uint8Array.prototype.includes.length);
+})();
+
+
+// %TypedArray%.prototype.includes should have name property with value
+// 'includes'
+(function() {
+ assertEquals("includes", Uint8Array.prototype.includes.name);
+})();
+
+
+// %TypedArray%.prototype.includes should always return false on zero-length
+// typed arrays
+(function() {
+ testTypedArrays(function(TypedArrayConstructor) {
+ var ta = new TypedArrayConstructor([]);
+ assertFalse(ta.includes(2));
+ assertFalse(ta.includes());
+ assertFalse(ta.includes(undefined));
+ assertFalse(ta.includes(NaN));
+ });
+})();
+
+
+// %TypedArray%.prototype.includes should use the SameValueZero algorithm to
+// compare
+(function() {
+ testTypedArrays.floatOnly(function(FloatArrayConstructor) {
+ assertTrue(new FloatArrayConstructor([1, 2, NaN]).includes(NaN));
+ assertTrue(new FloatArrayConstructor([1, 2, -0]).includes(+0));
+ assertTrue(new FloatArrayConstructor([1, 2, -0]).includes(-0));
+ assertTrue(new FloatArrayConstructor([1, 2, +0]).includes(-0));
+ assertTrue(new FloatArrayConstructor([1, 2, +0]).includes(+0));
+ assertFalse(new FloatArrayConstructor([1, 2, -Infinity]).includes(+Infinity));
+ assertTrue(new FloatArrayConstructor([1, 2, -Infinity]).includes(-Infinity));
+ assertFalse(new FloatArrayConstructor([1, 2, +Infinity]).includes(-Infinity));
+ assertTrue(new FloatArrayConstructor([1, 2, +Infinity]).includes(+Infinity));
+ });
+})();
diff --git a/deps/v8/test/mjsunit/invalid-lhs.js b/deps/v8/test/mjsunit/invalid-lhs.js
index 52ee89582a..d28dc9ccf8 100644
--- a/deps/v8/test/mjsunit/invalid-lhs.js
+++ b/deps/v8/test/mjsunit/invalid-lhs.js
@@ -50,9 +50,9 @@ assertDoesNotThrow("if (false) ++(eval('12'))", ReferenceError);
assertDoesNotThrow("if (false) (eval('12'))++", ReferenceError);
// For in:
-assertThrows("for (12 in [1]) print(12);", ReferenceError);
+assertThrows("for (12 in [1]) print(12);", SyntaxError);
assertThrows("for (eval('var x') in [1]) print(12);", ReferenceError);
-assertThrows("if (false) for (12 in [1]) print(12);", ReferenceError);
+assertThrows("if (false) for (12 in [1]) print(12);", SyntaxError);
assertDoesNotThrow("if (false) for (eval('0') in [1]) print(12);", ReferenceError);
// For:
@@ -64,7 +64,7 @@ assertDoesNotThrow("if (false) for (eval('var x') = 1;;) print(12);", ReferenceE
// Assignments to 'this'.
assertThrows("this = 42", ReferenceError);
assertThrows("function f() { this = 12; }", ReferenceError);
-assertThrows("for (this in {x:3, y:4, z:5}) ;", ReferenceError);
+assertThrows("for (this in {x:3, y:4, z:5}) ;", SyntaxError);
assertThrows("for (this = 0;;) ;", ReferenceError);
assertThrows("this++", ReferenceError);
assertThrows("++this", ReferenceError);
diff --git a/deps/v8/test/mjsunit/messages.js b/deps/v8/test/mjsunit/messages.js
index c30e59f3e1..45443c75d1 100644
--- a/deps/v8/test/mjsunit/messages.js
+++ b/deps/v8/test/mjsunit/messages.js
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --stack-size=100 --harmony --harmony-reflect --harmony-arrays
-// Flags: --harmony-regexps --strong-mode
+// Flags: --stack-size=100 --harmony --harmony-reflect --harmony-regexps
+// Flags: --harmony-simd --strong-mode
function test(f, expected, type) {
try {
@@ -308,11 +308,6 @@ test(function() {
"a" + 1;
}, "In strong mode, implicit conversions are deprecated", TypeError);
-// kSymbolToPrimitive
-test(function() {
- 1 + Object(Symbol());
-}, "Cannot convert a Symbol wrapper object to a primitive value", TypeError);
-
// kSymbolToString
test(function() {
"" + Symbol();
@@ -323,6 +318,11 @@ test(function() {
1 + Symbol();
}, "Cannot convert a Symbol value to a number", TypeError);
+// kSimdToNumber
+test(function() {
+ 1 + SIMD.Float32x4(1, 2, 3, 4);
+}, "Cannot convert a SIMD value to a number", TypeError);
+
// kUndefinedOrNullToObject
test(function() {
Array.prototype.toString.call(null);
diff --git a/deps/v8/test/mjsunit/migrations.js b/deps/v8/test/mjsunit/migrations.js
index 288bc61031..a18d884059 100644
--- a/deps/v8/test/mjsunit/migrations.js
+++ b/deps/v8/test/mjsunit/migrations.js
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-ayle license that can be
// found in the LICENSE file.
+// Flags: --harmony-object-observe
// Flags: --allow-natives-syntax --track-fields --expose-gc
var global = Function('return this')();
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 6c6a9b9755..0b333f10c9 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -132,6 +132,7 @@
# TODO(jarin/mstarzinger): Investigate debugger issues with TurboFan.
'debug-evaluate-const': [PASS, NO_VARIANTS],
'debug-evaluate-locals': [PASS, NO_VARIANTS],
+ 'debug-evaluate-locals-capturing': [PASS, NO_VARIANTS],
'debug-liveedit-check-stack': [PASS, NO_VARIANTS], # only in no-snap mode.
'debug-liveedit-double-call': [PASS, NO_VARIANTS],
'debug-set-variable-value': [PASS, NO_VARIANTS],
@@ -244,6 +245,17 @@
# BUG(v8:3838).
'regress/regress-3116': [PASS, ['isolates', FLAKY]],
+
+ # BUG(chromium:508074). Remove this once the issue is fixed.
+ 'harmony/arrow-rest-params': [PASS, NO_VARIANTS],
+ 'harmony/rest-params': [PASS, ['no_snap == True', NO_VARIANTS]],
+
+ # BUG(v8:4378).
+ 'regress/regress-crbug-501711': [PASS, ['isolates', SKIP]],
+ 'regress/regress-4279': [PASS, ['isolates', SKIP]],
+
+ # BUG(chromium:518748)
+ 'regress/regress-crbug-518748': [SKIP],
}], # ALWAYS
['novfp3 == True', {
@@ -301,11 +313,15 @@
# Issue 3723.
'regress/regress-3717': [SKIP],
- # Issue 478788.
- 'es7/object-observe': [SKIP],
# BUG(v8:4237)
'regress/regress-3976': [SKIP],
+
+ # BUG(v8:4359)
+ 'strong/load-proxy': [SKIP],
+
+ # BUG(v8:4381)
+ 'for-in-opt': [PASS, FAIL],
}], # 'gc_stress == True'
##############################################################################
@@ -647,7 +663,6 @@
'assert-opt-and-deopt': [SKIP],
'never-optimize': [SKIP],
'regress/regress-2185-2': [SKIP],
- 'harmony/object-observe': [SKIP],
'readonly': [SKIP],
'array-feedback': [SKIP],
diff --git a/deps/v8/test/mjsunit/opt-elements-kind.js b/deps/v8/test/mjsunit/opt-elements-kind.js
index 5f4f437299..515305a928 100644
--- a/deps/v8/test/mjsunit/opt-elements-kind.js
+++ b/deps/v8/test/mjsunit/opt-elements-kind.js
@@ -33,19 +33,19 @@
// Flags: --stress-runs=2
var elements_kind = {
- fast_smi_only : 'fast smi only elements',
- fast : 'fast elements',
- fast_double : 'fast double elements',
- dictionary : 'dictionary elements',
- external_byte : 'external byte elements',
- external_unsigned_byte : 'external unsigned byte elements',
- external_short : 'external short elements',
- external_unsigned_short : 'external unsigned short elements',
- external_int : 'external int elements',
- external_unsigned_int : 'external unsigned int elements',
- external_float : 'external float elements',
- external_double : 'external double elements',
- external_pixel : 'external pixel elements'
+ fast_smi_only : 'fast smi only elements',
+ fast : 'fast elements',
+ fast_double : 'fast double elements',
+ dictionary : 'dictionary elements',
+ fixed_int32 : 'fixed int8 elements',
+ fixed_uint8 : 'fixed uint8 elements',
+ fixed_int16 : 'fixed int16 elements',
+ fixed_uint16 : 'fixed uint16 elements',
+ fixed_int32 : 'fixed int32 elements',
+ fixed_uint32 : 'fixed uint32 elements',
+ fixed_float32 : 'fixed float32 elements',
+ fixed_float64 : 'fixed float64 elements',
+ fixed_uint8_clamped : 'fixed uint8_clamped elements'
}
function getKind(obj) {
@@ -53,34 +53,33 @@ function getKind(obj) {
if (%HasFastObjectElements(obj)) return elements_kind.fast;
if (%HasFastDoubleElements(obj)) return elements_kind.fast_double;
if (%HasDictionaryElements(obj)) return elements_kind.dictionary;
- // Every external kind is also an external array.
- assertTrue(%HasExternalArrayElements(obj));
- if (%HasExternalByteElements(obj)) {
- return elements_kind.external_byte;
+
+ if (%HasFixedInt8Elements(obj)) {
+ return elements_kind.fixed_int8;
}
- if (%HasExternalUnsignedByteElements(obj)) {
- return elements_kind.external_unsigned_byte;
+ if (%HasFixedUint8Elements(obj)) {
+ return elements_kind.fixed_uint8;
}
- if (%HasExternalShortElements(obj)) {
- return elements_kind.external_short;
+ if (%HasFixedInt16Elements(obj)) {
+ return elements_kind.fixed_int16;
}
- if (%HasExternalUnsignedShortElements(obj)) {
- return elements_kind.external_unsigned_short;
+ if (%HasFixedUint16Elements(obj)) {
+ return elements_kind.fixed_uint16;
}
- if (%HasExternalIntElements(obj)) {
- return elements_kind.external_int;
+ if (%HasFixedInt32Elements(obj)) {
+ return elements_kind.fixed_int32;
}
- if (%HasExternalUnsignedIntElements(obj)) {
- return elements_kind.external_unsigned_int;
+ if (%HasFixedUint32Elements(obj)) {
+ return elements_kind.fixed_uint32;
}
- if (%HasExternalFloatElements(obj)) {
- return elements_kind.external_float;
+ if (%HasFixedFloat32Elements(obj)) {
+ return elements_kind.fixed_float32;
}
- if (%HasExternalDoubleElements(obj)) {
- return elements_kind.external_double;
+ if (%HasFixedFloat64Elements(obj)) {
+ return elements_kind.fixed_float64;
}
- if (%HasExternalPixelElements(obj)) {
- return elements_kind.external_pixel;
+ if (%HasFixedUint8ClampedElements(obj)) {
+ return elements_kind.fixed_uint8_clamped;
}
}
diff --git a/deps/v8/test/mjsunit/osr-elements-kind.js b/deps/v8/test/mjsunit/osr-elements-kind.js
index 389b6dac6f..bd15ef37e4 100644
--- a/deps/v8/test/mjsunit/osr-elements-kind.js
+++ b/deps/v8/test/mjsunit/osr-elements-kind.js
@@ -33,19 +33,19 @@
// Flags: --stress-runs=2
var elements_kind = {
- fast_smi_only : 'fast smi only elements',
- fast : 'fast elements',
- fast_double : 'fast double elements',
- dictionary : 'dictionary elements',
- external_byte : 'external byte elements',
- external_unsigned_byte : 'external unsigned byte elements',
- external_short : 'external short elements',
- external_unsigned_short : 'external unsigned short elements',
- external_int : 'external int elements',
- external_unsigned_int : 'external unsigned int elements',
- external_float : 'external float elements',
- external_double : 'external double elements',
- external_pixel : 'external pixel elements'
+ fast_smi_only : 'fast smi only elements',
+ fast : 'fast elements',
+ fast_double : 'fast double elements',
+ dictionary : 'dictionary elements',
+ fixed_int32 : 'fixed int8 elements',
+ fixed_uint8 : 'fixed uint8 elements',
+ fixed_int16 : 'fixed int16 elements',
+ fixed_uint16 : 'fixed uint16 elements',
+ fixed_int32 : 'fixed int32 elements',
+ fixed_uint32 : 'fixed uint32 elements',
+ fixed_float32 : 'fixed float32 elements',
+ fixed_float64 : 'fixed float64 elements',
+ fixed_uint8_clamped : 'fixed uint8_clamped elements'
}
function getKind(obj) {
@@ -53,34 +53,33 @@ function getKind(obj) {
if (%HasFastObjectElements(obj)) return elements_kind.fast;
if (%HasFastDoubleElements(obj)) return elements_kind.fast_double;
if (%HasDictionaryElements(obj)) return elements_kind.dictionary;
- // Every external kind is also an external array.
- assertTrue(%HasExternalArrayElements(obj));
- if (%HasExternalByteElements(obj)) {
- return elements_kind.external_byte;
+
+ if (%HasFixedInt8Elements(obj)) {
+ return elements_kind.fixed_int8;
}
- if (%HasExternalUnsignedByteElements(obj)) {
- return elements_kind.external_unsigned_byte;
+ if (%HasFixedUint8Elements(obj)) {
+ return elements_kind.fixed_uint8;
}
- if (%HasExternalShortElements(obj)) {
- return elements_kind.external_short;
+ if (%HasFixedInt16Elements(obj)) {
+ return elements_kind.fixed_int16;
}
- if (%HasExternalUnsignedShortElements(obj)) {
- return elements_kind.external_unsigned_short;
+ if (%HasFixedUint16Elements(obj)) {
+ return elements_kind.fixed_uint16;
}
- if (%HasExternalIntElements(obj)) {
- return elements_kind.external_int;
+ if (%HasFixedInt32Elements(obj)) {
+ return elements_kind.fixed_int32;
}
- if (%HasExternalUnsignedIntElements(obj)) {
- return elements_kind.external_unsigned_int;
+ if (%HasFixedUint32Elements(obj)) {
+ return elements_kind.fixed_uint32;
}
- if (%HasExternalFloatElements(obj)) {
- return elements_kind.external_float;
+ if (%HasFixedFloat32Elements(obj)) {
+ return elements_kind.fixed_float32;
}
- if (%HasExternalDoubleElements(obj)) {
- return elements_kind.external_double;
+ if (%HasFixedFloat64Elements(obj)) {
+ return elements_kind.fixed_float64;
}
- if (%HasExternalPixelElements(obj)) {
- return elements_kind.external_pixel;
+ if (%HasFixedUint8ClampedElements(obj)) {
+ return elements_kind.fixed_uint8_clamped;
}
}
diff --git a/deps/v8/test/mjsunit/primitive-keyed-access.js b/deps/v8/test/mjsunit/primitive-keyed-access.js
new file mode 100644
index 0000000000..c83975a8d3
--- /dev/null
+++ b/deps/v8/test/mjsunit/primitive-keyed-access.js
@@ -0,0 +1,49 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.defineProperty(Number.prototype, "0",
+ { set: function(v) { set = v; }});
+Object.defineProperty(String.prototype, "0",
+ { set: function(v) { set = v; }});
+Object.defineProperty(String.prototype, "3",
+ { set: function(v) { set = v; }});
+
+var set;
+var n = 1;
+set = 0;
+n[0] = 100;
+assertEquals(100, set);
+var s = "bla";
+s[0] = 200;
+assertEquals(100, set);
+s[3] = 300;
+assertEquals(300, set);
+
+assertThrows(function(){"use strict"; var o = "123"; o[1] = 10; });
+assertThrows(function(){"use strict"; var o = ""; o[1] = 10; });
+assertThrows(function(){"use strict"; var o = 1; o[1] = 10; });
+
+assertThrows(function() {
+ "use strict";
+ var sym = Symbol('66');
+ sym.a = 0;
+});
+
+assertThrows(function() {
+ "use strict";
+ var sym = Symbol('66');
+ sym['a' + 'b'] = 0;
+});
+
+assertThrows(function() {
+ "use strict";
+ var sym = Symbol('66');
+ sym[62] = 0;
+});
+
+assertThrows(function() {
+ "use strict";
+ var o = "bla";
+ o["0"] = 1;
+});
diff --git a/deps/v8/test/mjsunit/regress-4399.js b/deps/v8/test/mjsunit/regress-4399.js
new file mode 100644
index 0000000000..a8fdab7d9d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-4399.js
@@ -0,0 +1,8 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+if (this.Worker) {
+ assertThrows(function() { Worker.prototype.constructor("55"); });
+}
diff --git a/deps/v8/test/mjsunit/regress/cross-script-vars.js b/deps/v8/test/mjsunit/regress/cross-script-vars.js
new file mode 100644
index 0000000000..fd235f997b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/cross-script-vars.js
@@ -0,0 +1,575 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function PrintDesc(desc, s) {
+ var json;
+ if (desc) {
+ json = JSON.stringify(desc);
+ } else {
+ json = "<no such property>";
+ }
+ if (s === undefined) {
+ print(json);
+ } else {
+ print(s + ": " + json);
+ }
+}
+
+
+var counters;
+var test_realm;
+var cfg;
+
+
+function GetDescriptor() {
+ var code = 'Object.getOwnPropertyDescriptor(global, "x")';
+ var desc = Realm.eval(test_realm, code);
+// PrintDesc(desc);
+ return desc;
+}
+
+function SetUp() {
+ counters = {};
+ Realm.shared = {counters: counters};
+ test_realm = Realm.create();
+ Realm.eval(test_realm, 'var global = Realm.global(Realm.current());');
+ print("=====================");
+ print("Test realm: " + test_realm);
+ assertEquals(undefined, GetDescriptor());
+}
+
+function TearDown() {
+ Realm.dispose(test_realm);
+ print("OK");
+}
+
+
+function AddStrict(code, cfg) {
+ return cfg.strict ? '"use strict"; ' + code : code;
+}
+
+function ForceMutablePropertyCellType() {
+ Realm.eval(test_realm, 'global.x = {}; global.x = undefined;');
+}
+
+function DeclareVar() {
+ var code = 'var x;';
+ return Realm.eval(test_realm, AddStrict(code, cfg));
+}
+
+function DefineVar(v) {
+ var code = 'var x = ' + v;
+ return Realm.eval(test_realm, AddStrict(code, cfg));
+}
+
+function DefineLoadVar() {
+ var name = 'LoadVar_' + test_realm;
+ var code =
+ 'var x;' +
+ 'function ' + name + '() {' +
+ ' return x;' +
+ '};';
+ return Realm.eval(test_realm, AddStrict(code, cfg));
+}
+
+function LoadVar() {
+ var name = 'LoadVar_' + test_realm;
+ var code =
+ (cfg.optimize ? '%OptimizeFunctionOnNextCall(' + name + ');' : '') +
+ name + '();';
+ return Realm.eval(test_realm, AddStrict(code, cfg));
+}
+
+function DefineStoreVar() {
+ var name = 'StoreVar_' + test_realm;
+ var code = 'var g = (Function("return this"))();' +
+ 'var x;' +
+ 'function ' + name + '(v) {' +
+// ' %DebugPrint(g);' +
+ ' return x = v;' +
+ '};';
+ return Realm.eval(test_realm, AddStrict(code, cfg));
+}
+
+function StoreVar(v) {
+ var name = 'StoreVar_' + test_realm;
+ var code =
+ (cfg.optimize ? '%OptimizeFunctionOnNextCall(' + name + ');' : '') +
+ name + '(' + v + ');';
+ return Realm.eval(test_realm, AddStrict(code, cfg));
+}
+
+// It does 13 iterations which results in 27 loads
+// and 14 stores.
+function LoadStoreLoop() {
+ var code = 'for(var x = 0; x < 13; x++);';
+ return Realm.eval(test_realm, AddStrict(code, cfg));
+}
+
+function DefineRWDataProperty() {
+ var code =
+ 'Object.defineProperty(global, "x", { ' +
+ ' value: 42, ' +
+ ' writable: true, ' +
+ ' enumerable: true, ' +
+ ' configurable: true ' +
+ '});';
+ return Realm.eval(test_realm, AddStrict(code, cfg));
+}
+
+function DefineRODataProperty() {
+ var code =
+ 'Object.defineProperty(global, "x", { ' +
+ ' value: 42, ' +
+ ' writable: false, ' +
+ ' enumerable: true, ' +
+ ' configurable: true ' +
+ '});';
+ return Realm.eval(test_realm, AddStrict(code, cfg));
+}
+
+function SetX_(v) {
+ var code =
+ 'global.x_ = ' + v + '; ';
+ return Realm.eval(test_realm, code);
+}
+
+function DefineRWAccessorProperty() {
+ var code =
+ 'Object.defineProperty(global, "x", {' +
+ ' get: function() { Realm.shared.counters.get_count++; return this.x_; },' +
+ ' set: function(v) { Realm.shared.counters.set_count++; this.x_ = v; },' +
+ ' enumerable: true, configurable: true' +
+ '});';
+ counters.get_count = 0;
+ counters.set_count = 0;
+ return Realm.eval(test_realm, AddStrict(code, cfg));
+}
+
+function DefineROAccessorProperty() {
+ var code =
+ 'Object.defineProperty(global, "x", {' +
+ ' get: function() { Realm.shared.counters.get_count++; return this.x_; },' +
+ ' enumerable: true, configurable: true' +
+ '});';
+ counters.get_count = 0;
+ counters.set_count = 0;
+ return Realm.eval(test_realm, AddStrict(code, cfg));
+}
+
+
+function testSuite(opt_cfg) {
+ //
+ // Non strict.
+ //
+
+ (function() {
+ SetUp();
+ cfg = {optimize: opt_cfg.optimize, strict: false};
+ DeclareVar();
+ DefineLoadVar();
+ DefineStoreVar();
+ assertEquals(undefined, LoadVar());
+ assertEquals(false, GetDescriptor().configurable);
+
+ // Force property cell type to kMutable.
+ DefineVar(undefined);
+ DefineVar(153);
+ assertEquals(false, GetDescriptor().configurable);
+
+ assertEquals(153, LoadVar());
+ assertEquals(113, StoreVar(113));
+ assertEquals(113, LoadVar());
+ LoadStoreLoop();
+ assertEquals(13, LoadVar());
+ TearDown();
+ })();
+
+
+ (function() {
+ SetUp();
+ cfg = {optimize: opt_cfg.optimize, strict: false};
+ ForceMutablePropertyCellType();
+ DefineLoadVar();
+ DefineStoreVar();
+ DefineRWDataProperty();
+ assertEquals(42, LoadVar());
+ assertEquals(true, GetDescriptor().configurable);
+
+ DefineVar(153);
+ assertEquals(true, GetDescriptor().configurable);
+
+ assertEquals(153, LoadVar());
+ assertEquals(113, StoreVar(113));
+ assertEquals(113, LoadVar());
+ LoadStoreLoop();
+ assertEquals(13, LoadVar());
+
+ // Now reconfigure to accessor.
+ DefineRWAccessorProperty();
+ assertEquals(undefined, GetDescriptor().value);
+ assertEquals(true, GetDescriptor().configurable);
+ assertEquals(0, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ assertEquals(undefined, LoadVar());
+ assertEquals(1, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ LoadStoreLoop();
+ assertEquals(28, counters.get_count);
+ assertEquals(14, counters.set_count);
+
+ assertEquals(13, LoadVar());
+ assertEquals(29, counters.get_count);
+ assertEquals(14, counters.set_count);
+
+ TearDown();
+ })();
+
+
+ (function() {
+ SetUp();
+ cfg = {optimize: opt_cfg.optimize, strict: false};
+ ForceMutablePropertyCellType();
+ DefineLoadVar();
+ DefineStoreVar();
+ DefineRODataProperty();
+ assertEquals(42, LoadVar());
+ assertEquals(true, GetDescriptor().configurable);
+
+ DefineVar(153);
+
+ assertEquals(42, LoadVar());
+ assertEquals(113, StoreVar(113));
+ assertEquals(42, LoadVar());
+ LoadStoreLoop();
+ assertEquals(42, LoadVar());
+
+ // Now reconfigure to accessor property.
+ DefineRWAccessorProperty();
+ assertEquals(undefined, GetDescriptor().value);
+ assertEquals(true, GetDescriptor().configurable);
+ assertEquals(0, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ assertEquals(undefined, LoadVar());
+ assertEquals(1, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ LoadStoreLoop();
+ assertEquals(28, counters.get_count);
+ assertEquals(14, counters.set_count);
+
+ assertEquals(13, LoadVar());
+ assertEquals(29, counters.get_count);
+ assertEquals(14, counters.set_count);
+
+ TearDown();
+ })();
+
+
+ (function() {
+ SetUp();
+ cfg = {optimize: opt_cfg.optimize, strict: false};
+ ForceMutablePropertyCellType();
+ DefineLoadVar();
+ DefineStoreVar();
+ DefineRWAccessorProperty();
+ assertEquals(0, counters.get_count);
+ assertEquals(0, counters.set_count);
+ assertEquals(true, GetDescriptor().configurable);
+
+ assertEquals(undefined, LoadVar());
+ assertEquals(1, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ DefineVar(153);
+ assertEquals(true, GetDescriptor().configurable);
+ assertEquals(1, counters.get_count);
+ assertEquals(1, counters.set_count);
+
+ assertEquals(153, LoadVar());
+ assertEquals(2, counters.get_count);
+ assertEquals(1, counters.set_count);
+
+ assertEquals(113, StoreVar(113));
+ assertEquals(2, counters.get_count);
+ assertEquals(2, counters.set_count);
+
+ assertEquals(113, LoadVar());
+ assertEquals(3, counters.get_count);
+ assertEquals(2, counters.set_count);
+
+ LoadStoreLoop();
+ assertEquals(30, counters.get_count);
+ assertEquals(16, counters.set_count);
+
+ assertEquals(13, LoadVar());
+ assertEquals(31, counters.get_count);
+ assertEquals(16, counters.set_count);
+
+ // Now reconfigure to data property.
+ DefineRWDataProperty();
+ assertEquals(42, GetDescriptor().value);
+ assertEquals(42, LoadVar());
+ assertEquals(113, StoreVar(113));
+ assertEquals(31, counters.get_count);
+ assertEquals(16, counters.set_count);
+
+ TearDown();
+ })();
+
+
+ (function() {
+ SetUp();
+ cfg = {optimize: opt_cfg.optimize, strict: false};
+ ForceMutablePropertyCellType();
+ DefineLoadVar();
+ DefineStoreVar();
+ DefineROAccessorProperty();
+ assertEquals(0, counters.get_count);
+ assertEquals(0, counters.set_count);
+ assertEquals(true, GetDescriptor().configurable);
+
+ assertEquals(undefined, LoadVar());
+ assertEquals(1, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ SetX_(42);
+ assertEquals(42, LoadVar());
+ assertEquals(2, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ DefineVar(153);
+ assertEquals(true, GetDescriptor().configurable);
+ assertEquals(2, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ assertEquals(42, LoadVar());
+ assertEquals(3, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ assertEquals(113, StoreVar(113));
+ assertEquals(3, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ assertEquals(42, LoadVar());
+ assertEquals(4, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ LoadStoreLoop();
+ assertEquals(5, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ assertEquals(42, LoadVar());
+ assertEquals(6, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ // Now reconfigure to data property.
+ DefineRWDataProperty();
+ assertEquals(42, GetDescriptor().value);
+ assertEquals(42, LoadVar());
+ assertEquals(113, StoreVar(113));
+ assertEquals(6, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ TearDown();
+ })();
+
+
+ //
+ // Strict.
+ //
+
+ (function() {
+ SetUp();
+ cfg = {optimize: opt_cfg.optimize, strict: true};
+ DeclareVar();
+ DefineLoadVar();
+ DefineStoreVar();
+ assertEquals(undefined, LoadVar());
+ assertEquals(false, GetDescriptor().configurable);
+
+ // Force property cell type to kMutable.
+ DefineVar(undefined);
+ DefineVar(153);
+ assertEquals(false, GetDescriptor().configurable);
+
+ assertEquals(153, LoadVar());
+ assertEquals(113, StoreVar(113));
+ assertEquals(113, LoadVar());
+ LoadStoreLoop();
+ assertEquals(13, LoadVar());
+ TearDown();
+ })();
+
+
+ (function() {
+ SetUp();
+ cfg = {optimize: opt_cfg.optimize, strict: true};
+ ForceMutablePropertyCellType();
+ DefineLoadVar();
+ DefineStoreVar();
+ DefineRWDataProperty();
+ assertEquals(42, LoadVar());
+ assertEquals(true, GetDescriptor().configurable);
+
+ DefineVar(153);
+ assertEquals(true, GetDescriptor().configurable);
+
+ assertEquals(153, LoadVar());
+ assertEquals(113, StoreVar(113));
+ assertEquals(113, LoadVar());
+ LoadStoreLoop();
+ assertEquals(13, LoadVar());
+ TearDown();
+ })();
+
+
+ (function() {
+ SetUp();
+ cfg = {optimize: opt_cfg.optimize, strict: true};
+ ForceMutablePropertyCellType();
+ DefineLoadVar();
+ DefineStoreVar();
+ DefineRWDataProperty();
+ assertEquals(true, GetDescriptor().configurable);
+ assertEquals(true, GetDescriptor().writable);
+ assertEquals(113, StoreVar(113));
+
+ DefineRODataProperty();
+ assertEquals(true, GetDescriptor().configurable);
+ assertEquals(false, GetDescriptor().writable);
+
+ assertEquals(42, LoadVar());
+ assertEquals(true, GetDescriptor().configurable);
+ assertThrows('DefineVar(153)');
+ assertEquals(42, LoadVar());
+ assertThrows('StoreVar(113)');
+ assertThrows('StoreVar(113)');
+ assertEquals(42, LoadVar());
+ assertThrows('StoreVar(42)');
+ assertEquals(42, LoadVar());
+ assertThrows('LoadStoreLoop()');
+ assertEquals(42, LoadVar());
+ TearDown();
+ })();
+
+
+ (function() {
+ SetUp();
+ cfg = {optimize: opt_cfg.optimize, strict: true};
+ ForceMutablePropertyCellType();
+ DefineLoadVar();
+ DefineStoreVar();
+ DefineRWAccessorProperty();
+ assertEquals(0, counters.get_count);
+ assertEquals(0, counters.set_count);
+ assertEquals(true, GetDescriptor().configurable);
+
+ assertEquals(undefined, LoadVar());
+ assertEquals(1, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ DefineVar(153);
+ assertEquals(true, GetDescriptor().configurable);
+ assertEquals(1, counters.get_count);
+ assertEquals(1, counters.set_count);
+
+ assertEquals(153, LoadVar());
+ assertEquals(2, counters.get_count);
+ assertEquals(1, counters.set_count);
+
+ assertEquals(113, StoreVar(113));
+ assertEquals(2, counters.get_count);
+ assertEquals(2, counters.set_count);
+
+ assertEquals(113, LoadVar());
+ assertEquals(3, counters.get_count);
+ assertEquals(2, counters.set_count);
+
+ LoadStoreLoop();
+ assertEquals(30, counters.get_count);
+ assertEquals(16, counters.set_count);
+
+ assertEquals(13, LoadVar());
+ assertEquals(31, counters.get_count);
+ assertEquals(16, counters.set_count);
+
+ // Now reconfigure to data property.
+ DefineRWDataProperty();
+ assertEquals(42, GetDescriptor().value);
+ assertEquals(42, LoadVar());
+ assertEquals(113, StoreVar(113));
+ assertEquals(31, counters.get_count);
+ assertEquals(16, counters.set_count);
+
+ TearDown();
+ })();
+
+
+ (function() {
+ SetUp();
+ cfg = {optimize: opt_cfg.optimize, strict: true};
+ ForceMutablePropertyCellType();
+ DefineLoadVar();
+ DefineStoreVar();
+ DefineROAccessorProperty();
+ assertEquals(0, counters.get_count);
+ assertEquals(0, counters.set_count);
+ assertEquals(true, GetDescriptor().configurable);
+
+ assertEquals(undefined, LoadVar());
+ assertEquals(1, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ SetX_(42);
+ assertEquals(42, LoadVar());
+ assertEquals(2, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ assertThrows('DefineVar(153)');
+ assertEquals(true, GetDescriptor().configurable);
+ assertEquals(2, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ assertEquals(42, LoadVar());
+ assertEquals(3, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ assertThrows('StoreVar(113)');
+ assertEquals(3, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ assertEquals(42, LoadVar());
+ assertEquals(4, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ assertThrows('LoadStoreLoop()');
+ assertEquals(4, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ assertEquals(42, LoadVar());
+ assertEquals(5, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ // Now reconfigure to data property.
+ DefineRWDataProperty();
+ assertEquals(42, GetDescriptor().value);
+ assertEquals(42, LoadVar());
+ assertEquals(113, StoreVar(113));
+ assertEquals(5, counters.get_count);
+ assertEquals(0, counters.set_count);
+
+ TearDown();
+ })();
+
+} // testSuite
+
+
+testSuite({optimize: false});
+testSuite({optimize: true});
diff --git a/deps/v8/test/mjsunit/regress/regress-119429.js b/deps/v8/test/mjsunit/regress/regress-119429.js
index a87648754a..859702ac7e 100644
--- a/deps/v8/test/mjsunit/regress/regress-119429.js
+++ b/deps/v8/test/mjsunit/regress/regress-119429.js
@@ -30,7 +30,7 @@
var d = 0;
function recurse() {
if (++d == 25135) { // A magic number just below stack overflow on ia32
- %DebugBreak();
+ %HandleDebuggerStatement();
}
recurse();
}
diff --git a/deps/v8/test/mjsunit/regress/regress-3315.js b/deps/v8/test/mjsunit/regress/regress-3315.js
index a1105e2848..bfd7df29b8 100644
--- a/deps/v8/test/mjsunit/regress/regress-3315.js
+++ b/deps/v8/test/mjsunit/regress/regress-3315.js
@@ -1,6 +1,8 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// Flags: --harmony-object-observe
var indexZeroCallCount = 0;
var indexOneCallCount = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-356589.js b/deps/v8/test/mjsunit/regress/regress-356589.js
index f93c545640..a47f51bac1 100644
--- a/deps/v8/test/mjsunit/regress/regress-356589.js
+++ b/deps/v8/test/mjsunit/regress/regress-356589.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --harmony-object-observe
+
// This test passes if it does not crash in debug mode
arr = ['a', 'b', 'c', 'd'];
diff --git a/deps/v8/test/mjsunit/regress/regress-417709a.js b/deps/v8/test/mjsunit/regress/regress-417709a.js
index d210c10429..5500be2cf0 100644
--- a/deps/v8/test/mjsunit/regress/regress-417709a.js
+++ b/deps/v8/test/mjsunit/regress/regress-417709a.js
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-object-observe
// Flags: --stack-size=100
var a = [];
diff --git a/deps/v8/test/mjsunit/regress/regress-4271.js b/deps/v8/test/mjsunit/regress/regress-4271.js
new file mode 100644
index 0000000000..bc18771e72
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4271.js
@@ -0,0 +1,24 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (this.Worker) {
+ // Throw rather than overflow internal field index
+ assertThrows(function() {
+ Worker.prototype.terminate();
+ });
+
+ assertThrows(function() {
+ Worker.prototype.getMessage();
+ });
+
+ assertThrows(function() {
+ Worker.prototype.postMessage({});
+ });
+
+ // Don't throw for real worker
+ var worker = new Worker('');
+ worker.getMessage();
+ worker.postMessage({});
+ worker.terminate();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-4279.js b/deps/v8/test/mjsunit/regress/regress-4279.js
new file mode 100644
index 0000000000..64ef967d89
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4279.js
@@ -0,0 +1,11 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (this.Worker && this.quit) {
+ try {
+ new Function(new Worker("55"));
+ } catch(err) {}
+
+ quit();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-4296.js b/deps/v8/test/mjsunit/regress/regress-4296.js
new file mode 100644
index 0000000000..5774952a94
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4296.js
@@ -0,0 +1,40 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function () {
+ var o = new String("ab");
+ function store(o, i, v) { o[i] = v; }
+ function load(o, i) { return o[i]; }
+
+ // Initialize the IC.
+ store(o, 2, 10);
+ load(o, 2);
+
+ store(o, 0, 100);
+ assertEquals("a", load(o, 0));
+})();
+
+(function () {
+ var o = {__proto__: new String("ab")};
+ function store(o, i, v) { o[i] = v; }
+ function load(o, i) { return o[i]; }
+
+ // Initialize the IC.
+ store(o, 2, 10);
+ load(o, 2);
+
+ store(o, 0, 100);
+ assertEquals("a", load(o, 0));
+})();
+
+(function () {
+ "use strict";
+ var o = {__proto__: {}};
+ function store(o, i, v) { o[i] = v; }
+
+ // Initialize the IC.
+ store(o, 0, 100);
+ o.__proto__.__proto__ = new String("bla");
+ assertThrows(function () { store(o, 1, 100) });
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-4309-1.js b/deps/v8/test/mjsunit/regress/regress-4309-1.js
new file mode 100644
index 0000000000..a13fd43a4a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4309-1.js
@@ -0,0 +1,37 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+var Debug = debug.Debug;
+
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var scopes = exec_state.frame().allScopes();
+ assertEquals(3, scopes.length);
+ assertEquals(debug.ScopeType.Local, scopes[0].scopeType());
+ assertEquals(debug.ScopeType.Script, scopes[1].scopeType());
+ assertEquals(debug.ScopeType.Global, scopes[2].scopeType());
+ } catch (e) {
+ exception = e;
+ }
+}
+
+function f() {
+ eval('');
+ debugger;
+}
+
+f();
+f();
+
+%OptimizeFunctionOnNextCall(f);
+Debug.setListener(listener);
+
+f();
+
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-4309-2.js b/deps/v8/test/mjsunit/regress/regress-4309-2.js
new file mode 100644
index 0000000000..984b0071c1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4309-2.js
@@ -0,0 +1,34 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+var Debug = debug.Debug;
+
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var scope = exec_state.frame().scope(0);
+ assertEquals(5, scope.scopeObject().property("i").value().value());
+ } catch (e) {
+ exception = e;
+ }
+}
+
+function f() {
+ eval('var i = 5');
+ debugger;
+}
+
+f();
+f();
+
+%OptimizeFunctionOnNextCall(f);
+Debug.setListener(listener);
+
+f();
+
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-4309-3.js b/deps/v8/test/mjsunit/regress/regress-4309-3.js
new file mode 100644
index 0000000000..687dd4c44a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4309-3.js
@@ -0,0 +1,39 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+var Debug = debug.Debug;
+
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var scopes = exec_state.frame().allScopes();
+ assertEquals(4, scopes.length);
+ assertEquals(debug.ScopeType.With, scopes[0].scopeType());
+ assertEquals(debug.ScopeType.Local, scopes[1].scopeType());
+ assertEquals(debug.ScopeType.Script, scopes[2].scopeType());
+ assertEquals(debug.ScopeType.Global, scopes[3].scopeType());
+ } catch (e) {
+ exception = e;
+ }
+}
+
+function f() {
+ with({}) {
+ debugger;
+ }
+}
+
+f();
+f();
+
+%OptimizeFunctionOnNextCall(f);
+Debug.setListener(listener);
+
+f();
+
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-4320.js b/deps/v8/test/mjsunit/regress/regress-4320.js
new file mode 100644
index 0000000000..df6a99b28f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4320.js
@@ -0,0 +1,21 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-debug-as debug
+
+var Debug = debug.Debug;
+
+function f() { g(); }
+
+function g() { }
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
+
+Debug.setListener(function() {});
+Debug.setBreakPoint(g, 0);
+
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-4325.js b/deps/v8/test/mjsunit/regress/regress-4325.js
new file mode 100644
index 0000000000..e88bdd3b08
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4325.js
@@ -0,0 +1,48 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+function Inner() {
+ this.p1 = 0;
+ this.p2 = 3;
+}
+
+function Outer() {
+ this.p3 = 0;
+}
+
+var i1 = new Inner();
+var i2 = new Inner();
+var o1 = new Outer();
+o1.inner = i1;
+// o1.map now thinks "inner" has type Inner.map1.
+// Deprecate Inner.map1:
+i1.p1 = 0.5;
+// Let Inner.map1 die by migrating i2 to Inner.map2:
+print(i2.p1);
+gc();
+// o1.map's descriptor for "inner" is now a cleared WeakCell;
+// o1.inner's actual map is Inner.map2.
+// Prepare Inner.map3, deprecating Inner.map2.
+i2.p2 = 0.5;
+// Deprecate o1's map.
+var o2 = new Outer();
+o2.p3 = 0.5;
+o2.inner = i2;
+// o2.map (Outer.map2) now says that o2.inner's type is Inner.map3.
+// Migrate o1 to Outer.map2.
+print(o1.p3);
+// o1.map now thinks that o1.inner has map Inner.map3 just like o2.inner,
+// but in fact o1.inner.map is still Inner.map2!
+
+function loader(o) {
+ return o.inner.p2;
+}
+loader(o2);
+loader(o2);
+%OptimizeFunctionOnNextCall(loader);
+assertEquals(0.5, loader(o2));
+assertEquals(3, loader(o1));
+gc(); // Crashes with --verify-heap.
diff --git a/deps/v8/test/mjsunit/regress/regress-455207.js b/deps/v8/test/mjsunit/regress/regress-455207.js
new file mode 100644
index 0000000000..88fec4a3b9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-455207.js
@@ -0,0 +1,12 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+var s = "";
+for (var i = 16; i < 1085; i++) {
+ s += ("var a" + i + " = " + i + ";");
+}
+s += "const x = 10;" +
+ "assertEquals(10, x); x = 11; assertEquals(11, x)";
+assertThrows(function() { eval(s); });
diff --git a/deps/v8/test/mjsunit/regress/regress-509961.js b/deps/v8/test/mjsunit/regress/regress-509961.js
new file mode 100644
index 0000000000..d28bc8a268
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-509961.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o = { x: 0 };
+delete o.x;
+function store(o, p, v) { o[p] = v; }
+store(o, "x", 1);
+store(o, "x", 1);
+store(o, "0", 1);
diff --git a/deps/v8/test/mjsunit/regress/regress-514362.js b/deps/v8/test/mjsunit/regress/regress-514362.js
new file mode 100644
index 0000000000..f69cfecebe
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-514362.js
@@ -0,0 +1,21 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-debug-as debug
+
+function bar(x) { debugger; }
+function foo() { bar(arguments[0]); }
+function wrap() { return foo(1); }
+
+wrap();
+wrap();
+%OptimizeFunctionOnNextCall(wrap);
+
+var Debug = debug.Debug;
+Debug.setListener(function(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ for (var i = 0; i < exec_state.frameCount(); i++) exec_state.frame(i);
+});
+
+wrap();
diff --git a/deps/v8/test/mjsunit/regress/regress-581.js b/deps/v8/test/mjsunit/regress/regress-581.js
index ab345a9b61..1b40f580e2 100644
--- a/deps/v8/test/mjsunit/regress/regress-581.js
+++ b/deps/v8/test/mjsunit/regress/regress-581.js
@@ -36,7 +36,6 @@ assertThrows(function() { a.concat(a); }, RangeError);
var b = [];
b[pow31 - 3] = 32;
-b[pow31 - 2] = "out_of_bounds";
var ab = a.concat(b);
assertEquals(2 * pow31 - 1, ab.length);
assertEquals(31, ab[pow31]);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-490021.js b/deps/v8/test/mjsunit/regress/regress-crbug-490021.js
new file mode 100644
index 0000000000..745c0a8010
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-490021.js
@@ -0,0 +1,15 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var global = new Object(3);
+function f() {
+ global[0] = global[0] >>> 15.5;
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-501711.js b/deps/v8/test/mjsunit/regress/regress-crbug-501711.js
index f8eda6e8d8..b253e9c912 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-501711.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-501711.js
@@ -8,7 +8,11 @@ function f() {
try {
f();
} catch(e) {
- Realm.create();
+ try {
+ Realm.create();
+ } catch (e) {
+ quit();
+ }
}
}
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-501809.js b/deps/v8/test/mjsunit/regress/regress-crbug-501809.js
index b348e5d5f6..c3abadfab5 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-501809.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-501809.js
@@ -6,4 +6,4 @@
var sab = new SharedArrayBuffer(8);
var ta = new Int32Array(sab);
ta.__defineSetter__('length', function() {;});
-assertThrows(function() { Atomics.compareExchange(ta, 4294967295, 0, 0); });
+Atomics.compareExchange(ta, 4294967295, 0, 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-505007-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-505007-1.js
index 6012577aaa..910f4a6720 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-505007-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-505007-1.js
@@ -4,11 +4,15 @@
// Flags: --stack-size=100 --allow-natives-syntax
+var count = 0;
function f() {
try {
f();
} catch(e) {
- %GetDebugContext();
+ if (count < 100) {
+ count++;
+ %GetDebugContext();
+ }
}
}
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-505007-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-505007-2.js
index dfa34ae6ad..96014c848d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-505007-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-505007-2.js
@@ -5,11 +5,16 @@
// Flags: --stack-size=100 --allow-natives-syntax
function g() {}
+
+var count = 0;
function f() {
try {
f();
} catch(e) {
- %ExecuteInDebugContext(g);
+ if (count < 100) {
+ count++;
+ %ExecuteInDebugContext(g);
+ }
}
}
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-505907.js b/deps/v8/test/mjsunit/regress/regress-crbug-505907.js
new file mode 100644
index 0000000000..761261eca0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-505907.js
@@ -0,0 +1,12 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+try {
+ var p = Proxy.create({ getPropertyDescriptor: function() { return [] } });
+ var o = Object.create(p);
+ with (o) { unresolved_name() }
+} catch(e) {
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-506549.js b/deps/v8/test/mjsunit/regress/regress-crbug-506549.js
new file mode 100644
index 0000000000..40e162caf5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-506549.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (this.Worker) {
+ var __v_5 = {};
+ __v_5.__defineGetter__('byteLength', function() {foo();});
+ var __v_8 = new Worker('onmessage = function() {};');
+ assertThrows(function() { __v_8.postMessage(__v_5); });
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-506956.js b/deps/v8/test/mjsunit/regress/regress-crbug-506956.js
new file mode 100644
index 0000000000..5862ddb296
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-506956.js
@@ -0,0 +1,12 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+try {
+ var p = Proxy.create({ getPropertyDescriptor: function() { throw "boom"; } });
+ var o = Object.create(p);
+ with (o) { delete unresolved_name; }
+} catch(e) {
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-507070.js b/deps/v8/test/mjsunit/regress/regress-crbug-507070.js
new file mode 100644
index 0000000000..0cb14b27e7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-507070.js
@@ -0,0 +1,20 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --cache=code --no-debug-code
+
+try { } catch(e) { }
+try { try { } catch (e) { } } catch(e) { }
+try {
+ var Debug = %GetDebugContext().Debug;
+ Debug.setListener(function(){});
+} catch(e) { }
+(function() {
+ Debug.setBreakPoint(function(){}, 0, 0);
+})();
+
+var a = 1;
+a += a;
+Debug.setListener(null);
+assertEquals(2, a);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-510426.js b/deps/v8/test/mjsunit/regress/regress-crbug-510426.js
new file mode 100644
index 0000000000..c82dbacfa9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-510426.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var s = new String('a');
+s[10000000] = 'bente';
+assertEquals(['0', '10000000'], Object.keys(s));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-510738.js b/deps/v8/test/mjsunit/regress/regress-crbug-510738.js
new file mode 100644
index 0000000000..0e154a9a94
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-510738.js
@@ -0,0 +1,17 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function check(f, result) {
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(result, f());
+}
+
+var x = 17;
+function generic_load() { return x; }
+check(generic_load, 17);
+
+function generic_store() { x = 13; return x; }
+check(generic_store, 13);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-511880.js b/deps/v8/test/mjsunit/regress/regress-crbug-511880.js
new file mode 100644
index 0000000000..f9b05ff7bc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-511880.js
@@ -0,0 +1,13 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (this.Worker) {
+ var __v_8 =
+ `var __v_9 = new Worker('postMessage(42)');
+ onmessage = function(parentMsg) {
+ __v_9.postMessage(parentMsg);
+ };`;
+ var __v_9 = new Worker(__v_8);
+ __v_9.postMessage(9);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-513472.js b/deps/v8/test/mjsunit/regress/regress-crbug-513472.js
new file mode 100644
index 0000000000..456fe0a11d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-513472.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+this.__proto__ = Error();
+assertThrows(function() { NaN = 1; });
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-513507.js b/deps/v8/test/mjsunit/regress/regress-crbug-513507.js
new file mode 100644
index 0000000000..dbf35c91fe
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-513507.js
@@ -0,0 +1,24 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --noflush-optimized-code-cache --allow-natives-syntax
+
+// The following triggers a GC in SharedFunctionInfo::AddToOptimizedCodeMap.
+// Flags: --gc-interval=1234 --gc-global
+
+function makeFun() {
+ function fun(osr_fuse) {
+ for (var i = 0; i < 3; ++i) {
+ if (i == osr_fuse) %OptimizeOsr();
+ }
+ for (var i = 3; i < 6; ++i) {
+ if (i == osr_fuse) %OptimizeOsr();
+ }
+ }
+ return fun;
+}
+
+makeFun()(7); // Warm up.
+makeFun()(4); // Optimize once.
+makeFun()(1); // Optimize again.
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-514081.js b/deps/v8/test/mjsunit/regress/regress-crbug-514081.js
new file mode 100644
index 0000000000..1acd8315cd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-514081.js
@@ -0,0 +1,15 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (this.Worker) {
+ var __v_7 = new Worker('onmessage = function() {};');
+ try {
+ var ab = new ArrayBuffer(2147483648);
+ // If creating the ArrayBuffer succeeded, then postMessage should fail.
+ assertThrows(function() { __v_7.postMessage(ab); });
+ } catch (e) {
+ // Creating the ArrayBuffer failed.
+ assertInstanceof(e, RangeError);
+ }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-516592.js b/deps/v8/test/mjsunit/regress/regress-crbug-516592.js
new file mode 100644
index 0000000000..1887824a6c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-516592.js
@@ -0,0 +1,18 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var i = Math.pow(2, 31);
+var a = [];
+a[i] = 31;
+var b = [];
+b[i - 2] = 33;
+try {
+ // This is supposed to throw a RangeError.
+ var c = a.concat(b);
+ // If it didn't, ObservableSetLength will detect the problem.
+ Object.observe(c, function() {});
+ c.length = 1;
+} catch(e) {
+ assertTrue(e instanceof RangeError);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-516775.js b/deps/v8/test/mjsunit/regress/regress-crbug-516775.js
new file mode 100644
index 0000000000..25d4d0103d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-516775.js
@@ -0,0 +1,53 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-concat-spreadable
+
+function arguments_with_length_getter(f) {
+ arguments.__defineGetter__('length', f);
+ return arguments;
+}
+
+var count = 0;
+function increment_count_return() { count++; return "boom"; }
+function increment_count_throw() { count++; throw "boom"; }
+
+// Do not read the length of an arguments object on the prototype chain of
+// an array.
+var a1 = [];
+%NormalizeElements(a1);
+a1.__proto__ = arguments_with_length_getter(increment_count_return);
+[].concat(a1);
+assertEquals(0, count);
+
+var a2 = [];
+%NormalizeElements(a2);
+a2.__proto__ = arguments_with_length_getter(increment_count_throw);
+[].concat(a2);
+assertEquals(0, count);
+
+// Do read the length of an arguments object if spreadable.
+var a3 = arguments_with_length_getter(increment_count_return);
+a3[Symbol.isConcatSpreadable] = true;
+[].concat(a3);
+assertEquals(1, count);
+
+var a4 = arguments_with_length_getter(increment_count_throw);
+a4[Symbol.isConcatSpreadable] = true;
+assertThrows(function() { [].concat(a4); });
+assertEquals(2, count);
+
+// Do read the length of an arguments object on the prototype chain of
+// an object.
+var a5 = {};
+a5.__proto__ = arguments_with_length_getter(increment_count_return);
+a5[Symbol.isConcatSpreadable] = true;
+[].concat(a5);
+assertEquals(3, count);
+
+var a6 = {};
+a6.__proto__ = arguments_with_length_getter(increment_count_throw);
+a6[Symbol.isConcatSpreadable] = true;
+assertThrows(function() { [].concat(a6); });
+assertEquals(4, count);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-517592.js b/deps/v8/test/mjsunit/regress/regress-crbug-517592.js
new file mode 100644
index 0000000000..760d892439
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-517592.js
@@ -0,0 +1,36 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --min-preparse-length=10
+
+var source =
+ "var foo = function foo() {\n" +
+ " return 1;\n" +
+ "}\n" +
+ "//@ sourceURL=test";
+
+Debug = debug.Debug;
+Debug.setListener(listener);
+var exception = null;
+var break_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) break_count++;
+ if (event != Debug.DebugEvent.AfterCompile) return;
+ try {
+ var name = event_data.script().name();
+ var id = event_data.script().id();
+ assertEquals("test", name);
+ Debug.setScriptBreakPointById(id, 2);
+ } catch (e) {
+ exception = e;
+ }
+}
+
+eval(source);
+
+assertEquals(0, break_count);
+foo();
+assertEquals(1, break_count);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-518747.js b/deps/v8/test/mjsunit/regress/regress-crbug-518747.js
new file mode 100644
index 0000000000..f1787c4c4b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-518747.js
@@ -0,0 +1,9 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (this.Worker) {
+ Worker.prototype = 12;
+ var __v_6 = new Worker('');
+ __v_6.postMessage([]);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-518748.js b/deps/v8/test/mjsunit/regress/regress-crbug-518748.js
new file mode 100644
index 0000000000..cccbc26c24
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-518748.js
@@ -0,0 +1,14 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (this.Worker) {
+ var workersToCreate = 1000;
+ var workers = [];
+ assertThrows(function() {
+ for (var i = 0; i < workersToCreate; i++) {
+ workers.push(new Worker(''));
+ }
+ });
+ print('#workers: ', workers.length);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-522380.js b/deps/v8/test/mjsunit/regress/regress-crbug-522380.js
new file mode 100644
index 0000000000..eba07f783f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-522380.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var global = this;
+global.__defineSetter__('x', function(v) { x = v; });
+assertThrows("global.x = 0", RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-522496.js b/deps/v8/test/mjsunit/regress/regress-crbug-522496.js
new file mode 100644
index 0000000000..e47e0a0677
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-522496.js
@@ -0,0 +1,9 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (this.Worker) {
+ var worker = new Worker("onmessage = function(){}");
+ var buf = new ArrayBuffer();
+ worker.postMessage(buf, [buf]);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-523308.js b/deps/v8/test/mjsunit/regress/regress-crbug-523308.js
new file mode 100644
index 0000000000..5715762ed6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-523308.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var error;
+try { reference_error(); } catch (e) { error = e; }
+toString = error.toString;
+error.__proto__ = [];
+assertEquals("ReferenceError: reference_error is not defined",
+ toString.call(error));
diff --git a/deps/v8/test/mjsunit/regress/regress-debugger-redirect.js b/deps/v8/test/mjsunit/regress/regress-debugger-redirect.js
new file mode 100644
index 0000000000..07c7fad7e6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-debugger-redirect.js
@@ -0,0 +1,37 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+function f(x) {
+ // This function compiles into code that only throws a redeclaration
+ // error. It contains no stack check and has no function body.
+ const x = 0;
+ return x;
+}
+
+function g() {
+ f(0);
+}
+
+var exception = null;
+var called = false;
+var Debug = debug.Debug;
+Debug.setBreakOnException();
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Exception) return;
+ try {
+ called = true;
+ Debug.setBreakPoint(f, 1);
+ } catch (e) {
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+
+assertThrows(g);
+assertNull(exception);
+assertTrue(called);
diff --git a/deps/v8/test/mjsunit/regress/regress-observe-map-cache.js b/deps/v8/test/mjsunit/regress/regress-observe-map-cache.js
index 4c7a7e3e97..c71759c0cc 100644
--- a/deps/v8/test/mjsunit/regress/regress-observe-map-cache.js
+++ b/deps/v8/test/mjsunit/regress/regress-observe-map-cache.js
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-object-observe
// Flags: --allow-natives-syntax --enable-slow-asserts
function f() {
diff --git a/deps/v8/test/mjsunit/regress/regress-typedarray-length.js b/deps/v8/test/mjsunit/regress/regress-typedarray-length.js
index ee85364735..cae55731f9 100644
--- a/deps/v8/test/mjsunit/regress/regress-typedarray-length.js
+++ b/deps/v8/test/mjsunit/regress/regress-typedarray-length.js
@@ -71,43 +71,6 @@ assertEquals(undefined, get(a));
assertEquals(undefined, get(a));
})();
-(function() {
- "use strict";
-
- class MyTypedArray extends Int32Array {
- constructor(length) {
- super(length);
- }
- }
-
- a = new MyTypedArray(1024);
-
- get = function(a) {
- return a.length;
- }
-
- assertEquals(1024, get(a));
- assertEquals(1024, get(a));
- assertEquals(1024, get(a));
- %OptimizeFunctionOnNextCall(get);
- assertEquals(1024, get(a));
-})();
-
-(function() {
- "use strict";
- var a = new Uint8Array(4);
- Object.defineProperty(a, "length", {get: function() { return "blah"; }});
- get = function(a) {
- return a.length;
- }
-
- assertEquals("blah", get(a));
- assertEquals("blah", get(a));
- assertEquals("blah", get(a));
- %OptimizeFunctionOnNextCall(get);
- assertEquals("blah", get(a));
-})();
-
// Ensure we cannot delete length, byteOffset, byteLength.
assertTrue(Int32Array.prototype.hasOwnProperty("length"));
assertTrue(Int32Array.prototype.hasOwnProperty("byteOffset"));
diff --git a/deps/v8/test/mjsunit/regress/string-compare-memcmp.js b/deps/v8/test/mjsunit/regress/string-compare-memcmp.js
index ae4b33ace9..45f47343ee 100644
--- a/deps/v8/test/mjsunit/regress/string-compare-memcmp.js
+++ b/deps/v8/test/mjsunit/regress/string-compare-memcmp.js
@@ -4,4 +4,4 @@
// Flags: --allow-natives-syntax
-assertEquals(-1, %StringCompareRT("abc\u0102", "abc\u0201"));
+assertEquals(-1, %StringCompare("abc\u0102", "abc\u0201"));
diff --git a/deps/v8/test/mjsunit/samevalue.js b/deps/v8/test/mjsunit/samevalue.js
index 229db0db4c..174afd2372 100644
--- a/deps/v8/test/mjsunit/samevalue.js
+++ b/deps/v8/test/mjsunit/samevalue.js
@@ -27,78 +27,94 @@
// Flags: --expose-natives-as natives
-// Test the SameValue internal method.
+// Test the SameValue and SameValueZero internal methods.
var obj1 = {x: 10, y: 11, z: "test"};
var obj2 = {x: 10, y: 11, z: "test"};
var sameValue = natives.$sameValue;
+var sameValueZero = natives.$sameValueZero;
-assertTrue(sameValue(0, 0));
-assertTrue(sameValue(+0, +0));
-assertTrue(sameValue(-0, -0));
-assertTrue(sameValue(1, 1));
-assertTrue(sameValue(2, 2));
-assertTrue(sameValue(-1, -1));
-assertTrue(sameValue(0.5, 0.5));
-assertTrue(sameValue(true, true));
-assertTrue(sameValue(false, false));
-assertTrue(sameValue(NaN, NaN));
-assertTrue(sameValue(null, null));
-assertTrue(sameValue("foo", "foo"));
-assertTrue(sameValue(obj1, obj1));
+// Calls SameValue and SameValueZero and checks that their results match.
+function sameValueBoth(a, b) {
+ var result = sameValue(a, b);
+ assertTrue(result === sameValueZero(a, b));
+ return result;
+}
+
+// Calls SameValue and SameValueZero and checks that their results don't match.
+function sameValueZeroOnly(a, b) {
+ var result = sameValueZero(a, b);
+ assertTrue(result && !sameValue(a, b));
+ return result;
+}
+
+assertTrue(sameValueBoth(0, 0));
+assertTrue(sameValueBoth(+0, +0));
+assertTrue(sameValueBoth(-0, -0));
+assertTrue(sameValueBoth(1, 1));
+assertTrue(sameValueBoth(2, 2));
+assertTrue(sameValueBoth(-1, -1));
+assertTrue(sameValueBoth(0.5, 0.5));
+assertTrue(sameValueBoth(true, true));
+assertTrue(sameValueBoth(false, false));
+assertTrue(sameValueBoth(NaN, NaN));
+assertTrue(sameValueBoth(null, null));
+assertTrue(sameValueBoth("foo", "foo"));
+assertTrue(sameValueBoth(obj1, obj1));
// Undefined values.
-assertTrue(sameValue());
-assertTrue(sameValue(undefined, undefined));
-
-assertFalse(sameValue(0,1));
-assertFalse(sameValue("foo", "bar"));
-assertFalse(sameValue(obj1, obj2));
-assertFalse(sameValue(true, false));
-
-assertFalse(sameValue(obj1, true));
-assertFalse(sameValue(obj1, "foo"));
-assertFalse(sameValue(obj1, 1));
-assertFalse(sameValue(obj1, undefined));
-assertFalse(sameValue(obj1, NaN));
-
-assertFalse(sameValue(undefined, true));
-assertFalse(sameValue(undefined, "foo"));
-assertFalse(sameValue(undefined, 1));
-assertFalse(sameValue(undefined, obj1));
-assertFalse(sameValue(undefined, NaN));
-
-assertFalse(sameValue(NaN, true));
-assertFalse(sameValue(NaN, "foo"));
-assertFalse(sameValue(NaN, 1));
-assertFalse(sameValue(NaN, obj1));
-assertFalse(sameValue(NaN, undefined));
-
-assertFalse(sameValue("foo", true));
-assertFalse(sameValue("foo", 1));
-assertFalse(sameValue("foo", obj1));
-assertFalse(sameValue("foo", undefined));
-assertFalse(sameValue("foo", NaN));
-
-assertFalse(sameValue(true, 1));
-assertFalse(sameValue(true, obj1));
-assertFalse(sameValue(true, undefined));
-assertFalse(sameValue(true, NaN));
-assertFalse(sameValue(true, "foo"));
-
-assertFalse(sameValue(1, true));
-assertFalse(sameValue(1, obj1));
-assertFalse(sameValue(1, undefined));
-assertFalse(sameValue(1, NaN));
-assertFalse(sameValue(1, "foo"));
+assertTrue(sameValueBoth());
+assertTrue(sameValueBoth(undefined, undefined));
+
+assertFalse(sameValueBoth(0,1));
+assertFalse(sameValueBoth("foo", "bar"));
+assertFalse(sameValueBoth(obj1, obj2));
+assertFalse(sameValueBoth(true, false));
+
+assertFalse(sameValueBoth(obj1, true));
+assertFalse(sameValueBoth(obj1, "foo"));
+assertFalse(sameValueBoth(obj1, 1));
+assertFalse(sameValueBoth(obj1, undefined));
+assertFalse(sameValueBoth(obj1, NaN));
+
+assertFalse(sameValueBoth(undefined, true));
+assertFalse(sameValueBoth(undefined, "foo"));
+assertFalse(sameValueBoth(undefined, 1));
+assertFalse(sameValueBoth(undefined, obj1));
+assertFalse(sameValueBoth(undefined, NaN));
+
+assertFalse(sameValueBoth(NaN, true));
+assertFalse(sameValueBoth(NaN, "foo"));
+assertFalse(sameValueBoth(NaN, 1));
+assertFalse(sameValueBoth(NaN, obj1));
+assertFalse(sameValueBoth(NaN, undefined));
+
+assertFalse(sameValueBoth("foo", true));
+assertFalse(sameValueBoth("foo", 1));
+assertFalse(sameValueBoth("foo", obj1));
+assertFalse(sameValueBoth("foo", undefined));
+assertFalse(sameValueBoth("foo", NaN));
+
+assertFalse(sameValueBoth(true, 1));
+assertFalse(sameValueBoth(true, obj1));
+assertFalse(sameValueBoth(true, undefined));
+assertFalse(sameValueBoth(true, NaN));
+assertFalse(sameValueBoth(true, "foo"));
+
+assertFalse(sameValueBoth(1, true));
+assertFalse(sameValueBoth(1, obj1));
+assertFalse(sameValueBoth(1, undefined));
+assertFalse(sameValueBoth(1, NaN));
+assertFalse(sameValueBoth(1, "foo"));
// Special string cases.
-assertFalse(sameValue("1", 1));
-assertFalse(sameValue("true", true));
-assertFalse(sameValue("false", false));
-assertFalse(sameValue("undefined", undefined));
-assertFalse(sameValue("NaN", NaN));
-
-// -0 and +0 are should be different
-assertFalse(sameValue(+0, -0));
-assertFalse(sameValue(-0, +0));
+assertFalse(sameValueBoth("1", 1));
+assertFalse(sameValueBoth("true", true));
+assertFalse(sameValueBoth("false", false));
+assertFalse(sameValueBoth("undefined", undefined));
+assertFalse(sameValueBoth("NaN", NaN));
+
+// SameValue considers -0 and +0 to be different; SameValueZero considers
+// -0 and +0 to be the same.
+assertTrue(sameValueZeroOnly(+0, -0));
+assertTrue(sameValueZeroOnly(-0, +0));
diff --git a/deps/v8/test/mjsunit/string-normalize.js b/deps/v8/test/mjsunit/string-normalize.js
index f88f193a09..d8ae74d4ea 100644
--- a/deps/v8/test/mjsunit/string-normalize.js
+++ b/deps/v8/test/mjsunit/string-normalize.js
@@ -9,3 +9,11 @@ assertEquals('', ''.normalize());
assertThrows(function() { ''.normalize('invalid'); }, RangeError);
assertTrue(delete Array.prototype.join);
assertThrows(function() { ''.normalize('invalid'); }, RangeError);
+
+// All of these toString to an invalid form argument.
+assertThrows(function() { ''.normalize(null) }, RangeError);
+assertThrows(function() { ''.normalize(true) }, RangeError);
+assertThrows(function() { ''.normalize(false) }, RangeError);
+assertThrows(function() { ''.normalize(42) }, RangeError);
+assertThrows(function() { ''.normalize({}) }, RangeError);
+assertThrows(function() { ''.normalize([]) }, RangeError);
diff --git a/deps/v8/test/mjsunit/strong/class-extend-null.js b/deps/v8/test/mjsunit/strong/class-extend-null.js
new file mode 100644
index 0000000000..3ed7b36dbb
--- /dev/null
+++ b/deps/v8/test/mjsunit/strong/class-extend-null.js
@@ -0,0 +1,97 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --strong-mode --allow-natives-syntax
+
+(function() {
+"use strict";
+
+let foo = null;
+
+function nullLiteral() {
+ class Class1 extends null {
+ constructor() {
+ super();
+ }
+ }
+}
+
+function nullVariable() {
+ class Class2 extends foo {
+ constructor() {
+ super();
+ }
+ }
+}
+
+function nullLiteralClassExpr() {
+ (class extends null {});
+}
+
+function nullVariableClassExpr() {
+ (class extends foo {});
+}
+
+assertDoesNotThrow(nullLiteral);
+%OptimizeFunctionOnNextCall(nullLiteral);
+assertDoesNotThrow(nullLiteral);
+
+assertDoesNotThrow(nullVariable);
+%OptimizeFunctionOnNextCall(nullVariable);
+assertDoesNotThrow(nullVariable);
+
+assertDoesNotThrow(nullLiteralClassExpr);
+%OptimizeFunctionOnNextCall(nullLiteralClassExpr);
+assertDoesNotThrow(nullLiteralClassExpr);
+
+assertDoesNotThrow(nullVariableClassExpr);
+%OptimizeFunctionOnNextCall(nullVariableClassExpr);
+assertDoesNotThrow(nullVariableClassExpr);
+})();
+
+(function() {
+"use strong";
+
+let foo = null;
+
+function nullLiteral() {
+ class Class1 extends null {
+ constructor() {
+ super();
+ }
+ }
+}
+
+function nullVariable() {
+ class Class2 extends foo {
+ constructor() {
+ super();
+ }
+ }
+}
+
+function nullLiteralClassExpr() {
+ (class extends null {});
+}
+
+function nullVariableClassExpr() {
+ (class extends foo {});
+}
+
+assertThrows(nullLiteral, TypeError);
+%OptimizeFunctionOnNextCall(nullLiteral);
+assertThrows(nullLiteral, TypeError);
+
+assertThrows(nullVariable, TypeError);
+%OptimizeFunctionOnNextCall(nullVariable);
+assertThrows(nullVariable, TypeError);
+
+assertThrows(nullLiteralClassExpr, TypeError);
+%OptimizeFunctionOnNextCall(nullLiteralClassExpr);
+assertThrows(nullLiteralClassExpr, TypeError);
+
+assertThrows(nullVariableClassExpr, TypeError);
+%OptimizeFunctionOnNextCall(nullVariableClassExpr);
+assertThrows(nullVariableClassExpr, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/strong/class-object-frozen.js b/deps/v8/test/mjsunit/strong/class-object-frozen.js
new file mode 100644
index 0000000000..2c442c0d51
--- /dev/null
+++ b/deps/v8/test/mjsunit/strong/class-object-frozen.js
@@ -0,0 +1,98 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --strong-mode
+
+"use strict";
+
+function getClass() {
+ class Foo {
+ static get bar() { return 0 }
+ get bar() { return 0 }
+ }
+ return Foo;
+}
+
+function getClassExpr() {
+ return (class { static get bar() { return 0 } get bar() { return 0 } });
+}
+
+function getClassStrong() {
+ "use strong";
+ class Foo {
+ static get bar() { return 0 }
+ get bar() { return 0 }
+ }
+ return Foo;
+}
+
+function getClassExprStrong() {
+ "use strong";
+ return (class { static get bar() { return 0 } get bar() { return 0 } });
+}
+
+function addProperty(o) {
+ o.baz = 1;
+}
+
+function convertPropertyToData(o) {
+ assertTrue(o.hasOwnProperty("bar"));
+ Object.defineProperty(o, "bar", { value: 1 });
+}
+
+function testWeakClass(classFunc) {
+ assertDoesNotThrow(function(){addProperty(classFunc())});
+ assertDoesNotThrow(function(){addProperty(classFunc().prototype)});
+ assertDoesNotThrow(function(){convertPropertyToData(classFunc())});
+ assertDoesNotThrow(function(){convertPropertyToData(classFunc().prototype)});
+}
+
+function testStrongClass(classFunc) {
+ assertThrows(function(){addProperty(classFunc())}, TypeError);
+ assertThrows(function(){addProperty(classFunc().prototype)}, TypeError);
+ assertThrows(function(){convertPropertyToData(classFunc())}, TypeError);
+ assertThrows(function(){convertPropertyToData(classFunc().prototype)},
+ TypeError);
+}
+
+testWeakClass(getClass);
+testWeakClass(getClassExpr);
+
+testStrongClass(getClassStrong);
+testStrongClass(getClassExprStrong);
+
+// Check strong classes don't freeze their parents.
+(function() {
+ let parent = getClass();
+
+ let classFunc = function() {
+ "use strong";
+ class Foo extends parent {
+ static get bar() { return 0 }
+ get bar() { return 0 }
+ }
+ return Foo;
+ }
+
+ testStrongClass(classFunc);
+ assertDoesNotThrow(function(){addProperty(parent)});
+ assertDoesNotThrow(function(){convertPropertyToData(parent)});
+})();
+
+// Check strong classes don't freeze their children.
+(function() {
+ let parent = getClassStrong();
+
+ let classFunc = function() {
+ class Foo extends parent {
+ static get bar() { return 0 }
+ get bar() { return 0 }
+ }
+ return Foo;
+ }
+
+ assertThrows(function(){addProperty(parent)}, TypeError);
+ assertThrows(function(){convertPropertyToData(parent)}, TypeError);
+ testWeakClass(classFunc);
+})();
diff --git a/deps/v8/test/mjsunit/strong/declaration-after-use.js b/deps/v8/test/mjsunit/strong/declaration-after-use.js
index 5f3ef2a79c..e6caccfcca 100644
--- a/deps/v8/test/mjsunit/strong/declaration-after-use.js
+++ b/deps/v8/test/mjsunit/strong/declaration-after-use.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
// Flags: --strong-mode --harmony-rest-parameters --harmony-arrow-functions
-// Flags: --harmony-computed-property-names
// Note that it's essential for these tests that the reference is inside dead
// code (because we already produce ReferenceErrors for run-time unresolved
diff --git a/deps/v8/test/mjsunit/strong/destructuring.js b/deps/v8/test/mjsunit/strong/destructuring.js
new file mode 100644
index 0000000000..67fe2ef4f1
--- /dev/null
+++ b/deps/v8/test/mjsunit/strong/destructuring.js
@@ -0,0 +1,25 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-destructuring
+// Flags: --harmony-arrow-functions --strong-mode --allow-natives-syntax
+
+(function() {
+ function f({ x = function() { return []; } }) { "use strong"; return x(); }
+ var a = f({ x: undefined });
+ assertTrue(%IsStrong(a));
+
+ // TODO(rossberg): Loading non-existent properties during destructuring should
+ // not throw in strong mode.
+ assertThrows(function() { f({}); }, TypeError);
+
+ function weakf({ x = function() { return []; } }) { return x(); }
+ a = weakf({});
+ assertFalse(%IsStrong(a));
+
+ function outerf() { return []; }
+ function f2({ x = outerf }) { "use strong"; return x(); }
+ a = f2({ x: undefined });
+ assertFalse(%IsStrong(a));
+})();
diff --git a/deps/v8/test/mjsunit/strong/literals.js b/deps/v8/test/mjsunit/strong/literals.js
index 73129e7a09..8c04d6e35a 100644
--- a/deps/v8/test/mjsunit/strong/literals.js
+++ b/deps/v8/test/mjsunit/strong/literals.js
@@ -310,8 +310,8 @@ let GeneratorPrototype = (function*(){}).__proto__;
'use strong';
function assertStrongClass(x) {
assertTrue(%IsStrong(x));
- // TODO(rossberg): strongify class prototype and instance
- // assertTrue(%IsStrong(x.prototype));
+ assertTrue(%IsStrong(x.prototype));
+ // TODO(rossberg): strongify class instance
// assertTrue(%IsStrong(new x));
}
class C {};
diff --git a/deps/v8/test/mjsunit/unbox-double-arrays.js b/deps/v8/test/mjsunit/unbox-double-arrays.js
index 5ed404025f..8711ffdf3d 100644
--- a/deps/v8/test/mjsunit/unbox-double-arrays.js
+++ b/deps/v8/test/mjsunit/unbox-double-arrays.js
@@ -489,17 +489,6 @@ test_for_in();
test_for_in();
test_for_in();
-function test_get_property_names() {
- names = %GetPropertyNames(large_array3);
- property_name_count = 0;
- for (x in names) { property_name_count++; };
- assertEquals(26, property_name_count);
-}
-
-test_get_property_names();
-test_get_property_names();
-test_get_property_names();
-
// Test elements getters.
assertEquals(expected_array_value(10), large_array3[10]);
assertEquals(expected_array_value(-NaN), large_array3[2]);
diff --git a/deps/v8/test/preparser/testcfg.py b/deps/v8/test/preparser/testcfg.py
index ddd311c201..7e51b8ef58 100644
--- a/deps/v8/test/preparser/testcfg.py
+++ b/deps/v8/test/preparser/testcfg.py
@@ -126,8 +126,8 @@ class PreparserTestSuite(testsuite.TestSuite):
with open(testcase.flags[0]) as f:
return f.read()
- def VariantFlags(self, testcase, default_flags):
- return [[]];
+ def _VariantGeneratorFactory(self):
+ return testsuite.StandardVariantGenerator
def GetSuite(name, root):
diff --git a/deps/v8/test/promises-aplus/lib/global.js b/deps/v8/test/promises-aplus/lib/global.js
index 1466d2063b..ece338ed3e 100644
--- a/deps/v8/test/promises-aplus/lib/global.js
+++ b/deps/v8/test/promises-aplus/lib/global.js
@@ -33,15 +33,6 @@ var clearTimeout;
var timers = {};
var currentId = 0;
-function PostMicrotask(fn) {
- var o = {};
- Object.observe(o, function() {
- fn();
- });
- // Change something to enqueue a microtask.
- o.x = 'hello';
-}
-
setInterval = function(fn, delay) {
var i = 0;
var id = currentId++;
@@ -52,9 +43,9 @@ setInterval = function(fn, delay) {
if (i++ >= delay) {
fn();
}
- PostMicrotask(loop);
+ %EnqueueMicrotask(loop);
}
- PostMicrotask(loop);
+ %EnqueueMicrotask(loop);
timers[id] = true;
return id;
}
diff --git a/deps/v8/test/promises-aplus/lib/mocha.js b/deps/v8/test/promises-aplus/lib/mocha.js
index 24d294ef8f..0a172b9d2f 100644
--- a/deps/v8/test/promises-aplus/lib/mocha.js
+++ b/deps/v8/test/promises-aplus/lib/mocha.js
@@ -41,15 +41,6 @@ var assert = require('assert');
(function() {
var TIMEOUT = 1000;
-function PostMicrotask(fn) {
- var o = {};
- Object.observe(o, function() {
- fn();
- });
- // Change something to enqueue a microtask.
- o.x = 'hello';
-}
-
var context = {
beingDescribed: undefined,
currentSuiteIndex: 0,
@@ -162,7 +153,7 @@ TestCase.prototype.Run = function(suite, postAction) {
if (this.isRegular) {
print('PASS: ' + suite.description + '#' + this.name);
}
- PostMicrotask(postAction);
+ %EnqueueMicrotask(postAction);
}.bind(this));
}.bind(this));
}.bind(this));
@@ -194,14 +185,14 @@ function TestSuite(described) {
TestSuite.prototype.Run = function() {
this.hasRun = this.currentIndex === this.cases.length;
if (this.hasRun) {
- PostMicrotask(Run);
+ %EnqueueMicrotask(Run);
return;
}
// TestCase.prototype.Run cannot throw an exception.
this.cases[this.currentIndex].Run(this, function() {
++this.currentIndex;
- PostMicrotask(Run);
+ %EnqueueMicrotask(Run);
}.bind(this));
};
@@ -224,7 +215,7 @@ TestSuite.prototype.ReportError = function(testCase, e) {
print('FAIL: ' + this.description + '#' + testCase.name + ': ' +
e.name + ' (' + e.message + ')');
++this.currentIndex;
- PostMicrotask(Run);
+ %EnqueueMicrotask(Run);
};
describe = function(description, fn) {
diff --git a/deps/v8/test/promises-aplus/testcfg.py b/deps/v8/test/promises-aplus/testcfg.py
index bd03379187..5f447c3f90 100644
--- a/deps/v8/test/promises-aplus/testcfg.py
+++ b/deps/v8/test/promises-aplus/testcfg.py
@@ -77,7 +77,7 @@ class PromiseAplusTestSuite(testsuite.TestSuite):
if fname.endswith('.js')]
def GetFlagsForTestCase(self, testcase, context):
- return (testcase.flags + context.mode_flags + ['--harmony'] +
+ return (testcase.flags + context.mode_flags + ['--allow-natives-syntax'] +
self.helper_files_pre +
[os.path.join(self.test_files_root, testcase.path + '.js')] +
self.helper_files_post)
diff --git a/deps/v8/test/simdjs/SimdJs.json b/deps/v8/test/simdjs/SimdJs.json
index b671ac4cb8..ae2a32e308 100644
--- a/deps/v8/test/simdjs/SimdJs.json
+++ b/deps/v8/test/simdjs/SimdJs.json
@@ -18,22 +18,17 @@
"test/simdjs/data/src/benchmarks/averageFloat32x4LoadX.js",
"test/simdjs/data/src/benchmarks/averageFloat32x4LoadXY.js",
"test/simdjs/data/src/benchmarks/averageFloat32x4LoadXYZ.js",
- "test/simdjs/data/src/benchmarks/averageFloat64x2.js",
- "test/simdjs/data/src/benchmarks/averageFloat64x2Load.js",
- "test/simdjs/data/src/benchmarks/mandelbrot.js",
"test/simdjs/data/src/benchmarks/matrix-multiplication.js",
"test/simdjs/data/src/benchmarks/transform.js",
"test/simdjs/data/src/benchmarks/shiftrows.js",
"test/simdjs/data/src/benchmarks/transpose4x4.js",
"test/simdjs/data/src/benchmarks/inverse4x4.js",
- "test/simdjs/data/src/benchmarks/sinx4.js",
"test/simdjs/data/src/benchmarks/memset.js",
"test/simdjs/data/src/benchmarks/memcpy.js"
],
"run_count": 5,
- "run_count_android_arm": 1,
- "run_count_android_arm64": 3,
"run_count_arm": 3,
+ "run_count_arm64": 3,
"tests": [
{
"flags": [
@@ -133,54 +128,6 @@
},
{
"flags": [
- "test/simdjs/data/src/benchmarks/averageFloat64x2.js"
- ],
- "main": "test/simdjs/harness-finish.js",
- "name": "averageFloat64x2",
- "results_regexp": "%s\\([ ]*([0-9.]+)(ms)?\\)",
- "tests": [
- {
- "name": "SIMD"
- },
- {
- "name": "Non-SIMD"
- }
- ]
- },
- {
- "flags": [
- "test/simdjs/data/src/benchmarks/averageFloat64x2Load.js"
- ],
- "main": "test/simdjs/harness-finish.js",
- "name": "averageFloat64x2Load",
- "results_regexp": "%s\\([ ]*([0-9.]+)(ms)?\\)",
- "tests": [
- {
- "name": "SIMD"
- },
- {
- "name": "Non-SIMD"
- }
- ]
- },
- {
- "flags": [
- "test/simdjs/data/src/benchmarks/mandelbrot.js"
- ],
- "main": "test/simdjs/harness-finish.js",
- "name": "mandelbrot",
- "results_regexp": "%s\\([ ]*([0-9.]+)(ms)?\\)",
- "tests": [
- {
- "name": "SIMD"
- },
- {
- "name": "Non-SIMD"
- }
- ]
- },
- {
- "flags": [
"test/simdjs/data/src/benchmarks/matrix-multiplication.js"
],
"main": "test/simdjs/harness-finish.js",
@@ -261,22 +208,6 @@
},
{
"flags": [
- "test/simdjs/data/src/benchmarks/sinx4.js"
- ],
- "main": "test/simdjs/harness-finish.js",
- "name": "sinx4",
- "results_regexp": "%s\\([ ]*([0-9.]+)(ms)?\\)",
- "tests": [
- {
- "name": "SIMD"
- },
- {
- "name": "Non-SIMD"
- }
- ]
- },
- {
- "flags": [
"test/simdjs/data/src/benchmarks/memset.js"
],
"main": "test/simdjs/harness-finish.js",
@@ -308,8 +239,7 @@
]
}
],
- "timeout_android_arm": 180,
- "timeout_android_arm64": 120,
- "timeout_arm": 120,
+ "timeout_arm": 240,
+ "timeout_arm64": 120,
"units": "ms"
} \ No newline at end of file
diff --git a/deps/v8/test/simdjs/harness-adapt.js b/deps/v8/test/simdjs/harness-adapt.js
index a2ca2372c4..c90d6cc9d1 100644
--- a/deps/v8/test/simdjs/harness-adapt.js
+++ b/deps/v8/test/simdjs/harness-adapt.js
@@ -27,3 +27,13 @@ load('ecmascript_simd.js');
load('base.js');
})();
+
+// ecmascript_simd_tests logs errors to the console.
+var console = {
+ log: function(x) { print(x); },
+};
+
+
+// Disable value type tests for now. The value semantics tests are incorrect.
+// TODO(bbudge): Drop when tests are fixed.
+var skipValueTests = true;
diff --git a/deps/v8/test/simdjs/simdjs.status b/deps/v8/test/simdjs/simdjs.status
index 99ce8865e5..4ef3c9841b 100644
--- a/deps/v8/test/simdjs/simdjs.status
+++ b/deps/v8/test/simdjs/simdjs.status
@@ -10,8 +10,12 @@
[
[ALWAYS, {
- # TODO(bradnelson): Drop when test is fixed upstream.
+ # TODO(bbudge): Drop when test is fixed upstream.
'benchmarks/aobench': SKIP,
+ 'benchmarks/averageFloat64x2': SKIP,
+ 'benchmarks/averageFloat64x2Load': SKIP,
+ 'benchmarks/mandelbrot': SKIP,
+ 'benchmarks/sinx4': SKIP,
# TODO(bbudge): Drop this when simd implementation is faster.
'benchmarks/memcpy': SKIP,
diff --git a/deps/v8/test/simdjs/testcfg.py b/deps/v8/test/simdjs/testcfg.py
index c0390afd65..cbe880d149 100644
--- a/deps/v8/test/simdjs/testcfg.py
+++ b/deps/v8/test/simdjs/testcfg.py
@@ -14,10 +14,9 @@ from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
-SIMDJS_ARCHIVE_REVISION = "07e2713e0c9ea19feb0732d5bd84770c87310d79"
-SIMDJS_ARCHIVE_MD5 = "cf6bddf99f18800b68e782054268ee3c"
-SIMDJS_URL = (
- "https://github.com/johnmccutchan/ecmascript_simd/archive/%s.tar.gz")
+SIMDJS_ARCHIVE_REVISION = "99ef44bd4f22acd203c01e524131bc7f2a7eab68"
+SIMDJS_ARCHIVE_MD5 = "1428773887924fa5a784bf0843615740"
+SIMDJS_URL = ("https://github.com/tc39/ecmascript_simd/archive/%s.tar.gz")
SIMDJS_SUITE_PATH = ["data", "src"]
@@ -65,10 +64,44 @@ class SimdJsTestSuite(testsuite.TestSuite):
def DownloadData(self):
revision = SIMDJS_ARCHIVE_REVISION
archive_url = SIMDJS_URL % revision
+
+ archive_prefix = "ecmascript_simd-"
archive_name = os.path.join(
- self.root, "ecmascript_simd-%s.tar.gz" % revision)
+ self.root, "%s%s.tar.gz" % (archive_prefix, revision))
directory_name = os.path.join(self.root, "data")
directory_old_name = os.path.join(self.root, "data.old")
+ versionfile = os.path.join(self.root, "CHECKED_OUT_VERSION")
+
+ checked_out_version = None
+ checked_out_url = None
+ checked_out_revision = None
+ if os.path.exists(versionfile):
+ with open(versionfile) as f:
+ try:
+ (checked_out_version,
+ checked_out_url,
+ checked_out_revision) = f.read().splitlines()
+ except ValueError:
+ pass
+ if (checked_out_version != SIMDJS_ARCHIVE_MD5 or
+ checked_out_url != archive_url or
+ checked_out_revision != revision):
+ if os.path.exists(archive_name):
+ print "Clobbering %s because CHECK_OUT_VERSION is out of date" % (
+ archive_name)
+ os.remove(archive_name)
+
+ # Clobber if the test is in an outdated state, i.e. if there are any other
+ # archive files present.
+ archive_files = [f for f in os.listdir(self.root)
+ if f.startswith(archive_prefix)]
+ if (len(archive_files) > 1 or
+ os.path.basename(archive_name) not in archive_files):
+ print "Clobber outdated test archives ..."
+ for f in archive_files:
+ print "Removing %s" % f
+ os.remove(os.path.join(self.root, f))
+
if not os.path.exists(archive_name):
print "Downloading test data from %s ..." % archive_url
utils.URLRetrieve(archive_url, archive_name)
@@ -96,6 +129,11 @@ class SimdJsTestSuite(testsuite.TestSuite):
os.rename(os.path.join(self.root, "ecmascript_simd-%s" % revision),
directory_name)
+ with open(versionfile, "w") as f:
+ f.write(SIMDJS_ARCHIVE_MD5 + '\n')
+ f.write(archive_url + '\n')
+ f.write(revision + '\n')
+
def GetSuite(name, root):
return SimdJsTestSuite(name, root)
diff --git a/deps/v8/test/test262-es6/test262-es6.status b/deps/v8/test/test262-es6/test262-es6.status
index 65ded6d3f5..16068bf0ca 100644
--- a/deps/v8/test/test262-es6/test262-es6.status
+++ b/deps/v8/test/test262-es6/test262-es6.status
@@ -34,16 +34,6 @@
'intl402/12.2.3_b': [FAIL],
# BUG(v8:4267)
- 'built-ins/Object/defineProperty/15.2.3.6-4-116': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-117': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-168': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-169': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-170': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-172': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-173': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-174': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-176': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-177': [FAIL],
'built-ins/Object/defineProperties/15.2.3.7-6-a-112': [FAIL],
'built-ins/Object/defineProperties/15.2.3.7-6-a-113': [FAIL],
'built-ins/Object/defineProperties/15.2.3.7-6-a-164': [FAIL],
@@ -56,6 +46,16 @@
'built-ins/Object/defineProperties/15.2.3.7-6-a-173': [FAIL],
'built-ins/Object/defineProperties/15.2.3.7-6-a-175': [FAIL],
'built-ins/Object/defineProperties/15.2.3.7-6-a-176': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-116': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-117': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-168': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-169': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-170': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-172': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-173': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-174': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-176': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-177': [FAIL],
# Unicode canonicalization is not available with i18n turned off.
'built-ins/String/prototype/localeCompare/15.5.4.9_CE': [['no_i18n', SKIP]],
@@ -63,9 +63,9 @@
###################### NEEDS INVESTIGATION #######################
# Possibly same cause as S8.5_A2.1, below: floating-point tests.
+ 'built-ins/Math/cos/S15.8.2.7_A7': [PASS, FAIL_OK],
'built-ins/Math/sin/S15.8.2.16_A7': [PASS, FAIL_OK],
'built-ins/Math/tan/S15.8.2.18_A7': [PASS, FAIL_OK],
- 'built-ins/Math/cos/S15.8.2.7_A7': [PASS, FAIL_OK],
# This is an incompatibility between ES5 and V8 on enumerating
# shadowed elements in a for..in loop.
@@ -76,7 +76,6 @@
# Class, let, const in sloppy mode.
# https://code.google.com/p/v8/issues/detail?id=3305
- 'built-ins/Array/prototype/concat/Array.prototype.concat_non-array': [PASS, FAIL_SLOPPY],
'language/block-scope/leave/finally-block-let-declaration-only-shadows-outer-parameter-value-1': [PASS, FAIL_SLOPPY],
'language/block-scope/leave/finally-block-let-declaration-only-shadows-outer-parameter-value-2': [PASS, FAIL_SLOPPY],
'language/block-scope/leave/for-loop-block-let-declaration-only-shadows-outer-parameter-value-1': [PASS, FAIL_SLOPPY],
@@ -109,107 +108,6 @@
'language/block-scope/syntax/redeclaration-in-block/attempt-to-redeclare-function-declaration-with-function-declaration': [PASS, FAIL_SLOPPY],
'language/block-scope/syntax/redeclaration-in-block/attempt-to-redeclare-function-declaration-with-var': [PASS, FAIL_SLOPPY],
'language/block-scope/syntax/redeclaration-in-block/attempt-to-redeclare-var-with-function-declaration': [PASS, FAIL_SLOPPY],
- 'language/class/definition/ClassDeclaration_restricted-properties': [PASS, FAIL_SLOPPY],
- 'language/class/definition/ClassExpression_restricted-properties': [PASS, FAIL_SLOPPY],
- 'language/class/definition/ClassMethod_restricted-properties': [PASS, FAIL_SLOPPY],
- 'language/class/method-definition/generator-no-yield': [PASS, FAIL_SLOPPY],
- 'language/class/method-definition/generator-return': [PASS, FAIL_SLOPPY],
- 'language/class/method-definition/yield-as-expression-with-rhs': [PASS, FAIL_SLOPPY],
- 'language/class/method-definition/yield-as-expression-without-rhs': [PASS, FAIL_SLOPPY],
- 'language/class/method-definition/yield-as-generator-method-binding-identifier': [PASS, FAIL_SLOPPY],
- 'language/class/method-definition/yield-as-literal-property-name': [PASS, FAIL_SLOPPY],
- 'language/class/method-definition/yield-as-property-name': [PASS, FAIL_SLOPPY],
- 'language/class/method-definition/yield-as-statement': [PASS, FAIL_SLOPPY],
- 'language/class/method-definition/yield-as-yield-operand': [PASS, FAIL_SLOPPY],
- 'language/class/method-definition/yield-newline': [PASS, FAIL_SLOPPY],
- 'language/class/method-definition/yield-star-before-newline': [PASS, FAIL_SLOPPY],
- 'language/computed-property-names/class/accessor/*': [PASS, FAIL_SLOPPY],
- 'language/computed-property-names/class/method/*': [PASS, FAIL_SLOPPY],
- 'language/computed-property-names/class/static/generator-constructor': [PASS, FAIL_SLOPPY],
- 'language/computed-property-names/class/static/generator-prototype': [PASS, FAIL_SLOPPY],
- 'language/computed-property-names/class/static/getter-constructor': [PASS, FAIL_SLOPPY],
- 'language/computed-property-names/class/static/getter-prototype': [PASS, FAIL_SLOPPY],
- 'language/computed-property-names/class/static/method-constructor': [PASS, FAIL_SLOPPY],
- 'language/computed-property-names/class/static/method-prototype': [PASS, FAIL_SLOPPY],
- 'language/computed-property-names/class/static/setter-constructor': [PASS, FAIL_SLOPPY],
- 'language/computed-property-names/class/static/setter-prototype': [PASS, FAIL_SLOPPY],
- 'language/computed-property-names/to-name-side-effects/class': [PASS, FAIL_SLOPPY],
- 'language/computed-property-names/to-name-side-effects/numbers-class': [PASS, FAIL_SLOPPY],
- 'language/expressions/arrow-function/lexical-super-call-from-within-constructor':[PASS, FAIL_SLOPPY],
- 'language/expressions/arrow-function/lexical-super-property-from-within-constructor': [PASS, FAIL_SLOPPY],
- 'language/expressions/arrow-function/lexical-super-property': [PASS, FAIL_SLOPPY],
- 'language/expressions/arrow-function/lexical-supercall-from-immediately-invoked-arrow': [PASS, FAIL_SLOPPY],
- 'language/expressions/object/method-definition/generator-param-redecl-const': [PASS, FAIL_SLOPPY],
- 'language/expressions/object/method-definition/generator-shadow-parameter-const': [PASS, FAIL_SLOPPY],
- 'language/rest-parameters/with-new-target': [PASS, FAIL_SLOPPY],
- 'language/statements/class/arguments/access': [PASS, FAIL_SLOPPY],
- 'language/statements/class/arguments/default-constructor': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/accessors': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/constructable-but-no-prototype': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/constructor-property': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/constructor-strict-by-default': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/constructor': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/getters-2': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/getters': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/implicit-constructor': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/invalid-extends': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/methods-named-eval-arguments': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/methods': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/numeric-property-names': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/prototype-getter': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/prototype-property': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/prototype-setter': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/prototype-wiring': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/setters-2': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/setters': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/side-effects-in-extends': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/side-effects-in-property-define': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/this-access-restriction-2': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/this-access-restriction': [PASS, FAIL_SLOPPY],
- 'language/statements/class/definition/this-check-ordering': [PASS, FAIL_SLOPPY],
- 'language/statements/class/name-binding/basic': [PASS, FAIL_SLOPPY],
- 'language/statements/class/name-binding/const': [PASS, FAIL_SLOPPY],
- 'language/statements/class/name-binding/expression': [PASS, FAIL_SLOPPY],
- 'language/statements/class/strict-mode/arguments-caller': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/binding': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/builtins': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/class-definition-evaluation-empty-constructor-heritage-present': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/class-definition-null-proto-contains-return-override': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/class-definition-null-proto-missing-return-override': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/class-definition-null-proto': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/class-definition-superclass-generator': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/default-constructor-2': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/default-constructor': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/derived-class-return-override-with-boolean': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/derived-class-return-override-with-empty': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/derived-class-return-override-with-null': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/derived-class-return-override-with-number': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/derived-class-return-override-with-object': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/derived-class-return-override-with-string': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/derived-class-return-override-with-symbol': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/derived-class-return-override-with-this': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/derived-class-return-override-with-undefined': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/superclass-prototype-setter-constructor': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/superclass-prototype-setter-method-override': [PASS, FAIL_SLOPPY],
- 'language/statements/class/subclass/superclass-static-method-override': [PASS, FAIL_SLOPPY],
- 'language/statements/class/super/in-constructor': [PASS, FAIL_SLOPPY],
- 'language/statements/class/super/in-getter': [PASS, FAIL_SLOPPY],
- 'language/statements/class/super/in-methods': [PASS, FAIL_SLOPPY],
- 'language/statements/class/super/in-setter': [PASS, FAIL_SLOPPY],
- 'language/statements/class/super/in-static-getter': [PASS, FAIL_SLOPPY],
- 'language/statements/class/super/in-static-methods': [PASS, FAIL_SLOPPY],
- 'language/statements/class/super/in-static-setter': [PASS, FAIL_SLOPPY],
- 'language/statements/class/syntax/class-body-has-direct-super-class-heritage': [PASS, FAIL_SLOPPY],
- 'language/statements/class/syntax/class-body-method-definition-super-property': [PASS, FAIL_SLOPPY],
- 'language/statements/class/syntax/class-declaration-binding-identifier-class-element-list': [PASS, FAIL_SLOPPY],
- 'language/statements/class/syntax/class-declaration-computed-method-definition': [PASS, FAIL_SLOPPY],
- 'language/statements/class/syntax/class-declaration-computed-method-generator-definition': [PASS, FAIL_SLOPPY],
- 'language/statements/class/syntax/class-declaration-heritage-identifier-reference-class-element-list': [PASS, FAIL_SLOPPY],
- 'language/statements/class/syntax/class-expression-binding-identifier-opt-class-element-list': [PASS, FAIL_SLOPPY],
- 'language/statements/class/syntax/class-expression-heritage-identifier-reference': [PASS, FAIL_SLOPPY],
- 'language/statements/class/syntax/class-expression': [PASS, FAIL_SLOPPY],
- 'language/statements/class/syntax/class-method-propname-constructor': [PASS, FAIL_SLOPPY],
- 'language/statements/class/syntax/early-errors/class-body-constructor-empty-missing-class-heritage': [PASS, FAIL_SLOPPY],
'language/statements/const/block-local-closure-get-before-initialization': [PASS, FAIL_SLOPPY],
'language/statements/const/block-local-use-before-initialization-in-declaration-statement': [PASS, FAIL_SLOPPY],
'language/statements/const/block-local-use-before-initialization-in-prior-statement': [PASS, FAIL_SLOPPY],
@@ -222,22 +120,22 @@
'language/statements/const/syntax/block-scope-syntax-const-declarations-mixed-with-without-initialiser': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/block-scope-syntax-const-declarations-mixed-without-with-initialiser': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/block-scope-syntax-const-declarations-without-initialiser': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/const': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/const-invalid-assignment-statement-body-for-in': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/const-invalid-assignment-statement-body-for-of': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/const-outer-inner-let-bindings': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/const': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/with-initializer-do-statement-while-expression': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/with-initializer-for-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/with-initializer-if-expression-statement-else-statement': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/with-initializer-if-expression-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/with-initializer-if-expression-statement-else-statement': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/with-initializer-label-statement': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/with-initializer-while-expression-statement': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/without-initializer-case-expression-statement-list': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/without-initializer-default-statement-list': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/without-initializer-do-statement-while-expression': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/without-initializer-for-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/without-initializer-if-expression-statement-else-statement': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/without-initializer-if-expression-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/without-initializer-if-expression-statement-else-statement': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/without-initializer-label-statement': [PASS, FAIL_SLOPPY],
'language/statements/const/syntax/without-initializer-while-expression-statement': [PASS, FAIL_SLOPPY],
'language/statements/continue/labeled-continue': [PASS, FAIL_SLOPPY],
@@ -247,19 +145,33 @@
'language/statements/continue/no-label-continue': [PASS, FAIL_SLOPPY],
'language/statements/continue/shadowing-loop-variable-in-same-scope-as-continue': [PASS, FAIL_SLOPPY],
'language/statements/continue/simple-and-labeled': [PASS, FAIL_SLOPPY],
+ 'language/statements/for-in/const-bound-names-fordecl-tdz-for-in': [PASS, FAIL_SLOPPY],
'language/statements/for-in/const-fresh-binding-per-iteration-for-in': [PASS, FAIL_SLOPPY],
+ 'language/statements/for-in/let-bound-names-fordecl-tdz-for-in': [PASS, FAIL_SLOPPY],
'language/statements/for-in/let-fresh-binding-per-iteration-for-in': [PASS, FAIL_SLOPPY],
+ 'language/statements/for-of/const-bound-names-fordecl-tdz-for-of': [PASS, FAIL_SLOPPY],
'language/statements/for-of/const-fresh-binding-per-iteration-for-of': [PASS, FAIL_SLOPPY],
+ 'language/statements/for-of/let-bound-names-fordecl-tdz-for-of': [PASS, FAIL_SLOPPY],
'language/statements/for-of/let-fresh-binding-per-iteration-for-of': [PASS, FAIL_SLOPPY],
'language/statements/for/const-fresh-binding-per-iteration-for': [PASS, FAIL_SLOPPY],
'language/statements/for/let-fresh-binding-per-iteration-for': [PASS, FAIL_SLOPPY],
+ 'language/statements/let/block-local-closure-get-before-initialization': [PASS, FAIL_SLOPPY],
+ 'language/statements/let/block-local-closure-set-before-initialization': [PASS, FAIL_SLOPPY],
+ 'language/statements/let/block-local-use-before-initialization-in-declaration-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/let/block-local-use-before-initialization-in-prior-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/let/function-local-closure-get-before-initialization': [PASS, FAIL_SLOPPY],
+ 'language/statements/let/function-local-closure-set-before-initialization': [PASS, FAIL_SLOPPY],
+ 'language/statements/let/function-local-use-before-initialization-in-declaration-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/let/function-local-use-before-initialization-in-prior-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/let/global-closure-get-before-initialization': [PASS, FAIL_SLOPPY],
+ 'language/statements/let/global-closure-set-before-initialization': [PASS, FAIL_SLOPPY],
+ 'language/statements/let/syntax/let': [PASS, FAIL_SLOPPY],
'language/statements/let/syntax/let-closure-inside-condition': [PASS, FAIL_SLOPPY],
'language/statements/let/syntax/let-closure-inside-initialization': [PASS, FAIL_SLOPPY],
'language/statements/let/syntax/let-closure-inside-next-expression': [PASS, FAIL_SLOPPY],
'language/statements/let/syntax/let-iteration-variable-is-freshly-allocated-for-each-iteration-multi-let-binding': [PASS, FAIL_SLOPPY],
'language/statements/let/syntax/let-iteration-variable-is-freshly-allocated-for-each-iteration-single-let-binding': [PASS, FAIL_SLOPPY],
'language/statements/let/syntax/let-outer-inner-let-bindings': [PASS, FAIL_SLOPPY],
- 'language/statements/let/syntax/let': [PASS, FAIL_SLOPPY],
'language/statements/let/syntax/with-initialisers-in-statement-positions-case-expression-statement-list': [PASS, FAIL_SLOPPY],
'language/statements/let/syntax/with-initialisers-in-statement-positions-default-statement-list': [PASS, FAIL_SLOPPY],
'language/statements/let/syntax/without-initialisers-in-statement-positions-case-expression-statement-list': [PASS, FAIL_SLOPPY],
@@ -271,6 +183,12 @@
# Number/Boolean.prototype is a plain object in ES6
# https://code.google.com/p/v8/issues/detail?id=4001
+ 'built-ins/Boolean/prototype/S15.6.3.1_A1': [FAIL],
+ 'built-ins/Boolean/prototype/S15.6.4_A1': [FAIL],
+ 'built-ins/Boolean/prototype/toString/S15.6.4.2_A1_T1': [FAIL],
+ 'built-ins/Boolean/prototype/toString/S15.6.4.2_A1_T2': [FAIL],
+ 'built-ins/Boolean/prototype/valueOf/S15.6.4.3_A1_T1': [FAIL],
+ 'built-ins/Boolean/prototype/valueOf/S15.6.4.3_A1_T2': [FAIL],
'built-ins/Number/15.7.4-1': [FAIL],
'built-ins/Number/prototype/S15.7.3.1_A2_*': [FAIL],
'built-ins/Number/prototype/S15.7.3.1_A3': [FAIL],
@@ -279,15 +197,6 @@
'built-ins/Number/prototype/toString/S15.7.4.2_A1_*': [FAIL],
'built-ins/Number/prototype/toString/S15.7.4.2_A2_*': [FAIL],
'built-ins/Number/prototype/valueOf/S15.7.4.4_A1_*': [FAIL],
- 'built-ins/Boolean/prototype/S15.6.3.1_A1': [FAIL],
- 'built-ins/Boolean/prototype/S15.6.4_A1': [FAIL],
- 'built-ins/Boolean/prototype/toString/S15.6.4.2_A1_T1': [FAIL],
- 'built-ins/Boolean/prototype/toString/S15.6.4.2_A1_T2': [FAIL],
- 'built-ins/Boolean/prototype/valueOf/S15.6.4.3_A1_T1': [FAIL],
- 'built-ins/Boolean/prototype/valueOf/S15.6.4.3_A1_T2': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4118
- 'built-ins/Object/getOwnPropertyNames/15.2.3.4-4-44': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=3087
'built-ins/Array/prototype/every/15.4.4.16-3-12': [FAIL],
@@ -362,21 +271,30 @@
'language/expressions/object/prop-def-id-eval-error-2': [FAIL],
'language/statements/for-of/iterator-as-proxy': [FAIL],
'language/statements/for-of/iterator-next-result-type': [FAIL],
+ 'built-ins/Array/of/return-abrupt-from-data-property-using-proxy': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=4093
'built-ins/Array/symbol-species': [FAIL],
+ 'built-ins/Array/symbol-species-name': [FAIL],
'built-ins/ArrayBuffer/symbol-species': [FAIL],
+ 'built-ins/ArrayBuffer/symbol-species-name': [FAIL],
'built-ins/Map/symbol-species': [FAIL],
+ 'built-ins/Map/symbol-species-name': [FAIL],
+ 'built-ins/Promise/Symbol.species/prop-desc': [FAIL],
+ 'built-ins/Promise/Symbol.species/return-value': [FAIL],
+ 'built-ins/Promise/all/species-get-error': [FAIL],
+ 'built-ins/Promise/prototype/then/ctor-custom': [FAIL],
+ 'built-ins/Promise/race/species-get-error': [FAIL],
'built-ins/Promise/symbol-species': [FAIL],
+ 'built-ins/Promise/symbol-species-name': [FAIL],
'built-ins/RegExp/symbol-species': [FAIL],
+ 'built-ins/RegExp/symbol-species-name': [FAIL],
'built-ins/Set/symbol-species': [FAIL],
+ 'built-ins/Set/symbol-species-name': [FAIL],
'built-ins/Symbol/species/basic': [FAIL],
'built-ins/Symbol/species/builtin-getter-name': [FAIL],
'built-ins/Symbol/species/subclassing': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=4242
- 'built-ins/Date/15.9.1.15-1': [FAIL],
-
# https://code.google.com/p/v8/issues/detail?id=4004
'built-ins/Date/prototype/setFullYear/15.9.5.40_1': [FAIL],
@@ -387,7 +305,18 @@
'built-ins/GeneratorPrototype/next/context-constructor-invocation': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=3566
+ 'built-ins/Array/from/iter-map-fn-err': [FAIL],
+ 'built-ins/Array/from/iter-set-elem-prop-err': [FAIL],
+ 'built-ins/Map/iterator-close-after-set-failure': [FAIL],
+ 'built-ins/Map/iterator-item-first-entry-returns-abrupt': [FAIL],
+ 'built-ins/Map/iterator-item-second-entry-returns-abrupt': [FAIL],
+ 'built-ins/Map/iterator-items-are-not-object-close-iterator': [FAIL],
+ 'built-ins/Promise/all/iter-close': [FAIL],
'built-ins/Set/set-iterator-close-after-add-failure': [FAIL],
+ 'built-ins/WeakMap/iterator-close-after-set-failure': [FAIL],
+ 'built-ins/WeakMap/iterator-item-first-entry-returns-abrupt': [FAIL],
+ 'built-ins/WeakMap/iterator-item-second-entry-returns-abrupt': [FAIL],
+ 'built-ins/WeakMap/iterator-items-are-not-object-close-iterator': [FAIL],
'built-ins/WeakSet/iterator-close-after-add-failure': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=3715
@@ -414,6 +343,9 @@
'built-ins/Promise/race/S25.4.4.3_A3.1_T2': [FAIL],
'built-ins/Promise/reject/S25.4.4.4_A3.1_T1': [FAIL],
+ # https://code.google.com/p/v8/issues/detail?id=4341
+ 'built-ins/Promise/resolve/arg-uniq-ctor': [FAIL],
+
# https://code.google.com/p/v8/issues/detail?id=4119
'built-ins/RegExp/15.10.4.1-1': [FAIL],
'built-ins/RegExp/S15.10.3.1_A2_T1': [FAIL],
@@ -426,6 +358,7 @@
# https://code.google.com/p/v8/issues/detail?id=4244
'built-ins/RegExp/prototype/exec/S15.10.6.2_A5_T3': [FAIL],
+ 'built-ins/RegExp/prototype/test/S15.10.6.3_A1_T22': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=4006
'built-ins/String/prototype/S15.5.4_A1': [FAIL],
@@ -436,8 +369,8 @@
# https://code.google.com/p/v8/issues/detail?id=4245
'built-ins/String/prototype/split/S15.5.4.14_A2_T37': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=3088
- 'built-ins/Symbol/auto-boxing-strict': [FAIL],
+ # https://code.google.com/p/v8/issues/detail?id=4348
+ 'built-ins/String/prototype/Symbol.iterator/this-val-non-obj-coercible': [FAIL],
# The order of adding the name property is wrong
# https://code.google.com/p/v8/issues/detail?id=4199
@@ -445,10 +378,8 @@
'language/computed-property-names/class/static/method-symbol': [FAIL, FAIL_SLOPPY],
'language/computed-property-names/class/static/method-string': [FAIL, FAIL_SLOPPY],
- # new.target
- # https://code.google.com/p/v8/issues/detail?id=3887
- 'language/expressions/arrow-function/lexical-new.target': [FAIL],
- 'language/expressions/arrow-function/lexical-new.target-closure-returned': [FAIL],
+ # This should work as soon as rest parameters are re-implemented via desaguring.
+ 'language/expressions/arrow-function/syntax/early-errors/arrowparameters-cover-no-duplicates-rest': [PASS, FAIL],
# https://code.google.com/p/v8/issues/detail?id=2160
'language/expressions/arrow-function/syntax/arrowparameters-cover-initialize-1': [FAIL],
@@ -479,6 +410,13 @@
# We do not expose Array.prototype.values
# https://code.google.com/p/v8/issues/detail?id=4247
'built-ins/Array/prototype/Symbol.iterator': [FAIL],
+ 'built-ins/Array/prototype/values/returns-iterator': [FAIL],
+ 'built-ins/Array/prototype/values/returns-iterator-from-object': [FAIL],
+ 'built-ins/Array/prototype/values/prop-desc': [FAIL],
+ 'built-ins/Array/prototype/values/name': [FAIL],
+ 'built-ins/Array/prototype/values/length': [FAIL],
+ 'built-ins/Array/prototype/values/iteration': [FAIL],
+ 'built-ins/Array/prototype/values/iteration-mutable': [FAIL],
#https://code.google.com/p/v8/issues/detail?id=3983
'language/expressions/generators/yield-as-function-expression-binding-identifier': [FAIL],
@@ -505,132 +443,8 @@
'built-ins/GeneratorPrototype/return/try-finally-within-finally': [FAIL],
'built-ins/GeneratorPrototype/return/try-finally-within-try': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=4177
- 'language/arguments-object/mapped/mapped-arguments-nonconfigurable-2': [FAIL],
- 'language/arguments-object/mapped/mapped-arguments-nonconfigurable-3': [FAIL],
- 'language/arguments-object/mapped/mapped-arguments-nonconfigurable-4': [FAIL],
- 'language/arguments-object/mapped/mapped-arguments-nonconfigurable-delete-2': [FAIL],
- 'language/arguments-object/mapped/mapped-arguments-nonconfigurable-delete-3': [FAIL],
- 'language/arguments-object/mapped/mapped-arguments-nonconfigurable-delete-4': [FAIL],
- 'language/arguments-object/mapped/mapped-arguments-nonconfigurable-nonwritable-3': [FAIL],
- 'language/arguments-object/mapped/mapped-arguments-nonconfigurable-nonwritable-4': [FAIL],
- 'language/arguments-object/mapped/mapped-arguments-nonconfigurable-nonwritable-5': [FAIL],
- 'language/arguments-object/mapped/mapped-arguments-nonconfigurable-strict-delete-2': [FAIL],
- 'language/arguments-object/mapped/mapped-arguments-nonconfigurable-strict-delete-3': [FAIL],
- 'language/arguments-object/mapped/mapped-arguments-nonconfigurable-strict-delete-4': [FAIL],
-
# https://code.google.com/p/v8/issues/detail?id=811
- 'language/expressions/assignment/destructuring/array-elem-elision': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-init-assignment': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-init-evaluation': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-init-in': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-init-let': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-init-order': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-init-simple-no-strict': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-init-yield-expr': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-init-yield-ident-valid': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-nested-array': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-nested-array-null': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-nested-array-undefined': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-nested-array-undefined-hole': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-nested-array-undefined-own': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-nested-array-yield-expr': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-nested-array-yield-ident-valid': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-nested-obj': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-nested-obj-null': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-nested-obj-undefined': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-nested-obj-undefined-hole': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-nested-obj-undefined-own': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-nested-obj-yield-expr': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-nested-obj-yield-ident-valid': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-put-const': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-put-let': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-put-prop-ref': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-put-prop-ref-no-get': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-put-prop-ref-user-err': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-put-unresolvable-no-strict': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-put-unresolvable-strict': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-target-identifier': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-target-simple-no-strict': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-target-yield-expr': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-target-yield-valid': [FAIL],
- 'language/expressions/assignment/destructuring/array-empty': [FAIL],
- 'language/expressions/assignment/destructuring/array-iteration': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-after-element': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-after-elision': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-elision': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-iteration': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-nested-array': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-nested-array-null': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-nested-array-undefined': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-nested-array-undefined-hole': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-nested-array-undefined-own': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-nested-array-yield-expr': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-nested-array-yield-ident-valid': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-nested-obj': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-nested-obj-null': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-nested-obj-undefined': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-nested-obj-undefined-hole': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-nested-obj-undefined-own': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-nested-obj-yield-expr': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-nested-obj-yield-ident-valid': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-put-const': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-put-let': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-put-prop-ref': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-put-prop-ref-no-get': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-put-prop-ref-user-err': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-put-unresolvable-no-strict': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-put-unresolvable-strict': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-yield-expr': [FAIL],
- 'language/expressions/assignment/destructuring/array-rest-yield-ident-valid': [FAIL],
- 'language/expressions/assignment/destructuring/array-sparse': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-identifier-resolution': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-identifier-yield-ident-valid': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-init-assignment': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-init-evaluation': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-init-in': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-init-let': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-init-order': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-init-simple-no-strict': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-init-yield-expr': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-init-yield-ident-valid': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-put-const': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-put-let': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-put-unresolvable-no-strict': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-put-unresolvable-strict': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-simple-no-strict': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-elem-init-assignment': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-elem-init-evaluation': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-elem-init-in': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-elem-init-let': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-elem-init-yield-expr': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-elem-init-yield-ident-valid': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-elem-target-yield-expr': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-elem-target-yield-ident-valid': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-identifier-resolution': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-name-evaluation': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-name-evaluation-error': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-nested-array': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-nested-array-null': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-nested-array-undefined': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-nested-array-undefined-own': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-nested-array-yield-expr': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-nested-array-yield-ident-valid': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-nested-obj': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-nested-obj-null': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-nested-obj-undefined': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-nested-obj-undefined-own': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-nested-obj-yield-expr': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-nested-obj-yield-ident-valid': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-put-const': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-put-let': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-put-order': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-put-prop-ref': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-put-prop-ref-no-get': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-put-prop-ref-user-err': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-put-unresolvable-no-strict': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-put-unresolvable-strict': [FAIL],
- 'language/expressions/assignment/destructuring/object-empty': [FAIL],
+ 'language/expressions/assignment/destructuring/*': [SKIP],
# https://code.google.com/p/v8/issues/detail?id=4248
'language/expressions/compound-assignment/S11.13.2_A5.*': [FAIL],
@@ -661,8 +475,55 @@
'language/expressions/assignment/S11.13.1_A6*': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=3699
+ 'built-ins/Function/instance-name': [FAIL],
+ 'built-ins/GeneratorFunction/instance-name': [FAIL],
+ 'language/expressions/assignment/fn-name-arrow': [FAIL],
+ 'language/expressions/assignment/fn-name-class': [FAIL],
+ 'language/expressions/assignment/fn-name-cover': [FAIL],
+ 'language/expressions/assignment/fn-name-fn': [FAIL],
+ 'language/expressions/assignment/fn-name-gen': [FAIL],
+ 'language/expressions/assignment/fn-name-lhs-cover': [FAIL],
+ 'language/expressions/assignment/fn-name-lhs-member': [FAIL],
+ 'language/expressions/class/name': [FAIL],
+ 'language/expressions/function/name': [FAIL],
'language/expressions/generators/implicit-name': [FAIL],
+ 'language/expressions/generators/name': [FAIL],
'language/expressions/generators/name-property-descriptor': [FAIL],
+ 'language/expressions/object/fn-name-accessor-get': [FAIL],
+ 'language/expressions/object/fn-name-accessor-set': [FAIL],
+ 'language/expressions/object/fn-name-arrow': [FAIL],
+ 'language/expressions/object/fn-name-class': [FAIL],
+ 'language/expressions/object/fn-name-cover': [FAIL],
+ 'language/expressions/object/fn-name-fn': [FAIL],
+ 'language/expressions/object/fn-name-gen': [FAIL],
+ 'language/expressions/object/fn-name-lhs-cover': [FAIL],
+ 'language/expressions/object/fn-name-lhs-member': [FAIL],
+ 'language/expressions/object/method-definition/fn-name-accessor-get': [FAIL],
+ 'language/expressions/object/method-definition/fn-name-accessor-set': [FAIL],
+ 'language/expressions/object/method-definition/fn-name-arrow': [FAIL],
+ 'language/expressions/object/method-definition/fn-name-class': [FAIL],
+ 'language/expressions/object/method-definition/fn-name-cover': [FAIL],
+ 'language/expressions/object/method-definition/fn-name-fn': [FAIL],
+ 'language/expressions/object/method-definition/fn-name-gen': [FAIL],
+ 'language/statements/class/definition/fn-name-accessor-get': [FAIL],
+ 'language/statements/class/definition/fn-name-accessor-set': [FAIL],
+ 'language/statements/class/definition/fn-name-gen-method': [FAIL],
+ 'language/statements/class/definition/fn-name-method': [FAIL],
+ 'language/statements/const/fn-name-arrow': [FAIL],
+ 'language/statements/const/fn-name-class': [FAIL],
+ 'language/statements/const/fn-name-cover': [FAIL],
+ 'language/statements/const/fn-name-fn': [FAIL],
+ 'language/statements/const/fn-name-gen': [FAIL],
+ 'language/statements/let/fn-name-arrow': [FAIL],
+ 'language/statements/let/fn-name-class': [FAIL],
+ 'language/statements/let/fn-name-cover': [FAIL],
+ 'language/statements/let/fn-name-fn': [FAIL],
+ 'language/statements/let/fn-name-gen': [FAIL],
+ 'language/statements/variable/fn-name-arrow': [FAIL],
+ 'language/statements/variable/fn-name-class': [FAIL],
+ 'language/statements/variable/fn-name-cover': [FAIL],
+ 'language/statements/variable/fn-name-fn': [FAIL],
+ 'language/statements/variable/fn-name-gen': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=4251
'language/expressions/postfix-increment/S11.3.1_A5_T1': [FAIL],
@@ -681,58 +542,191 @@
'language/expressions/object/method-definition/generator-name-prop-symbol': [FAIL],
'language/expressions/object/method-definition/name-name-prop-symbol': [FAIL],
+ # https://code.google.com/p/v8/issues/detail?id=4317
+ 'built-ins/Array/prototype/concat/is-concat-spreadable-val-falsey': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=2952
+ 'built-ins/RegExp/prototype/exec/u-lastindex-adv': [FAIL],
+ 'built-ins/RegExp/prototype/exec/u-captured-value': [FAIL],
+ 'built-ins/RegExp/prototype/exec/u-lastindex-value': [FAIL],
+ 'built-ins/RegExp/prototype/test/u-captured-value': [FAIL],
+ 'built-ins/RegExp/prototype/test/u-lastindex-adv': [FAIL],
+ 'built-ins/RegExp/prototype/test/u-lastindex-value': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/length': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/name': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/prop-desc': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/this-invald-obj': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/this-non-obj': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/this-regexp': [FAIL],
+ 'built-ins/RegExp/unicode_identity_escape': [FAIL],
+ 'language/literals/regexp/u-unicode-esc': [FAIL],
+ 'language/literals/regexp/u-surrogate-pairs': [FAIL],
+ 'language/literals/regexp/u-case-mapping': [FAIL],
+ 'language/literals/regexp/u-astral': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4342
+ 'built-ins/RegExp/prototype/exec/get-sticky-coerce': [FAIL],
+ 'built-ins/RegExp/prototype/exec/get-sticky-err': [FAIL],
+ 'built-ins/RegExp/prototype/exec/y-fail-lastindex': [FAIL],
+ 'built-ins/RegExp/prototype/exec/y-fail-lastindex-no-write': [FAIL],
+ 'built-ins/RegExp/prototype/exec/y-fail-return': [FAIL],
+ 'built-ins/RegExp/prototype/exec/y-fail-lastindex': [FAIL],
+ 'built-ins/RegExp/prototype/exec/y-init-lastindex': [FAIL],
+ 'built-ins/RegExp/prototype/exec/y-set-lastindex': [FAIL],
+ 'built-ins/RegExp/prototype/sticky/prop-desc': [FAIL],
+ 'built-ins/RegExp/prototype/sticky/this-invalid-obj': [FAIL],
+ 'built-ins/RegExp/prototype/sticky/this-non-obj': [FAIL],
+ 'built-ins/RegExp/prototype/sticky/this-regexp': [FAIL],
+ 'built-ins/RegExp/prototype/test/get-sticky-coerce': [FAIL],
+ 'built-ins/RegExp/prototype/test/get-sticky-err': [FAIL],
+ 'built-ins/RegExp/prototype/test/y-fail-lastindex-no-write': [FAIL],
+ 'built-ins/RegExp/prototype/test/y-fail-return': [FAIL],
+ 'built-ins/RegExp/prototype/test/y-fail-lastindex': [FAIL],
+ 'built-ins/RegExp/prototype/test/y-init-lastindex': [FAIL],
+ 'built-ins/RegExp/prototype/test/y-set-lastindex': [FAIL],
+ 'built-ins/RegExp/valid-flags-y': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4305
+ 'built-ins/RegExp/prototype/Symbol.match/*': [FAIL],
+ 'built-ins/String/prototype/endsWith/return-abrupt-from-searchstring-regexp-test': [FAIL],
+ 'built-ins/String/prototype/includes/return-abrupt-from-searchstring-regexp-test': [FAIL],
+ 'built-ins/String/prototype/startsWith/return-abrupt-from-searchstring-regexp-test': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4343
+ 'built-ins/RegExp/prototype/Symbol.replace/*': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4344
+ 'built-ins/RegExp/prototype/Symbol.search/*': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4345
+ 'built-ins/RegExp/prototype/Symbol.split/*': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4346
+ 'built-ins/RegExp/prototype/flags/*': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4347
+ 'built-ins/RegExp/prototype/global/name': [FAIL],
+ 'built-ins/RegExp/prototype/ignoreCase/name': [FAIL],
+ 'built-ins/RegExp/prototype/multiline/name': [FAIL],
+ 'built-ins/RegExp/prototype/source/name': [FAIL],
+ 'built-ins/RegExp/prototype/sticky/name': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4360
+ 'intl402/Collator/10.1.1_1': [FAIL],
+ 'intl402/DateTimeFormat/12.1.1_1': [FAIL],
+ 'intl402/NumberFormat/11.1.1_1': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4361
+ 'intl402/Collator/10.1.1_a': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=1972
+ 'language/identifiers/val-break-via-escape-hex': [FAIL],
+ 'language/identifiers/val-break-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-case-via-escape-hex': [FAIL],
+ 'language/identifiers/val-case-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-catch-via-escape-hex': [FAIL],
+ 'language/identifiers/val-catch-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-class-via-escape-hex': [FAIL],
+ 'language/identifiers/val-class-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-const-via-escape-hex': [FAIL],
+ 'language/identifiers/val-const-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-continue-via-escape-hex': [FAIL],
+ 'language/identifiers/val-continue-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-debugger-via-escape-hex': [FAIL],
+ 'language/identifiers/val-debugger-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-default-via-escape-hex': [FAIL],
+ 'language/identifiers/val-default-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-delete-via-escape-hex': [FAIL],
+ 'language/identifiers/val-delete-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-do-via-escape-hex': [FAIL],
+ 'language/identifiers/val-do-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-else-via-escape-hex': [FAIL],
+ 'language/identifiers/val-else-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-enum-via-escape-hex': [FAIL],
+ 'language/identifiers/val-enum-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-export-via-escape-hex': [FAIL],
+ 'language/identifiers/val-export-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-extends-via-escape-hex': [FAIL],
+ 'language/identifiers/val-extends-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-false-via-escape-hex': [FAIL],
+ 'language/identifiers/val-false-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-finally-via-escape-hex': [FAIL],
+ 'language/identifiers/val-finally-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-for-via-escape-hex': [FAIL],
+ 'language/identifiers/val-for-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-function-via-escape-hex': [FAIL],
+ 'language/identifiers/val-function-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-if-via-escape-hex': [FAIL],
+ 'language/identifiers/val-if-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-import-via-escape-hex': [FAIL],
+ 'language/identifiers/val-import-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-in-via-escape-hex': [FAIL],
+ 'language/identifiers/val-in-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-instanceof-via-escape-hex': [FAIL],
+ 'language/identifiers/val-instanceof-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-new-via-escape-hex': [FAIL],
+ 'language/identifiers/val-new-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-null-via-escape-hex': [FAIL],
+ 'language/identifiers/val-null-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-return-via-escape-hex': [FAIL],
+ 'language/identifiers/val-return-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-super-via-escape-hex': [FAIL],
+ 'language/identifiers/val-super-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-switch-via-escape-hex': [FAIL],
+ 'language/identifiers/val-switch-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-throw-via-escape-hex': [FAIL],
+ 'language/identifiers/val-throw-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-true-via-escape-hex': [FAIL],
+ 'language/identifiers/val-true-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-try-via-escape-hex': [FAIL],
+ 'language/identifiers/val-try-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-typeof-via-escape-hex': [FAIL],
+ 'language/identifiers/val-typeof-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-var-via-escape-hex': [FAIL],
+ 'language/identifiers/val-var-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-void-via-escape-hex': [FAIL],
+ 'language/identifiers/val-void-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-while-via-escape-hex': [FAIL],
+ 'language/identifiers/val-while-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-with-via-escape-hex': [FAIL],
+ 'language/identifiers/val-with-via-escape-hex4': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4362
+ 'built-ins/String/prototype/repeat/empty-string-returns-empty': [PASS, FAIL],
+
######################## NEEDS INVESTIGATION ###########################
# These test failures are specific to the intl402 suite and need investigation
# to be either marked as bugs with issues filed for them or as deliberate
# incompatibilities if the test cases turn out to be broken or ambiguous.
+ # Some of these are related to v8:4361 in being visible side effects from Intl.
'intl402/6.2.3': [FAIL],
'intl402/9.2.1_2': [FAIL],
'intl402/9.2.6_2': [FAIL],
- 'intl402/10.1.1_a': [FAIL],
- 'intl402/10.1.2.1_4': [FAIL],
- 'intl402/10.1.2_a': [PASS, FAIL],
- 'intl402/10.2.3_b': [PASS, FAIL],
- 'intl402/10.3.2_1_c': [PASS, FAIL],
- 'intl402/10.3.2_CS_b_NN': [PASS, FAIL],
- 'intl402/10.3.2_CS_c_NN': [PASS, FAIL],
- 'intl402/10.3.2_CS_d_NN': [PASS, FAIL],
- 'intl402/10.3_a': [FAIL],
- 'intl402/11.1.1_20_c': [FAIL],
- 'intl402/11.1.1_a': [FAIL],
- 'intl402/11.1.2': [PASS, FAIL],
- 'intl402/11.1.2.1_4': [FAIL],
- 'intl402/11.3_a': [FAIL],
- 'intl402/12.1.1_a': [FAIL],
- 'intl402/12.1.2': [PASS, FAIL],
- 'intl402/12.1.2.1_4': [FAIL],
- 'intl402/12.3.2_FDT_7_a_iv': [FAIL],
- 'intl402/12.3.3': [FAIL],
- 'intl402/12.3_a': [FAIL],
- 'intl402/13.1.1_7': [PASS, FAIL],
- 'intl402/13.2.1_5': [PASS, FAIL],
- 'intl402/13.3.0_7': [PASS, FAIL],
-
- # These tests fail in nosnap in strict mode
- # https://code.google.com/p/v8/issues/detail?id=4198
- 'built-ins/String/S15.5.1.1_A1_T6': [PASS, FAIL_OK],
- 'built-ins/eval/S15.1.2.1_A1.1_T1': [PASS, FAIL_OK],
- 'built-ins/eval/S15.1.2.1_A1.1_T2': [PASS, FAIL_OK],
- 'built-ins/eval/S15.1.2.1_A4.3': [PASS, FAIL_OK],
- 'built-ins/eval/S15.1.2.1_A4.4': [PASS, FAIL_OK],
- 'language/eval-code/10.4.2-1-1': [PASS, FAIL_OK],
- 'language/eval-code/10.4.2-1-2': [PASS, FAIL_OK],
- 'language/eval-code/10.4.2-1-3': [PASS, FAIL_OK],
- 'language/eval-code/10.4.2-1-5': [PASS, FAIL_OK],
- 'language/eval-code/S10.4.2.1_A1': [PASS, FAIL_OK],
- 'language/function-code/10.4.3-1-19-s': [PASS, FAIL_OK],
- 'language/function-code/10.4.3-1-19gs': [PASS, FAIL_OK],
- 'language/function-code/10.4.3-1-20-s': [PASS, FAIL_OK],
- 'language/function-code/10.4.3-1-20gs': [PASS, FAIL_OK],
- 'language/statements/variable/12.2.1-10-s': [PASS, FAIL_OK],
- 'language/statements/variable/12.2.1-20-s': [PASS, FAIL_OK],
- 'language/statements/variable/12.2.1-21-s': [PASS, FAIL_OK],
- 'language/statements/variable/12.2.1-9-s': [PASS, FAIL_OK],
+ 'intl402/Collator/10.1.2.1_4': [FAIL],
+ 'intl402/Collator/10.1.2_a': [PASS, FAIL],
+ 'intl402/Collator/10.2.3_b': [PASS, FAIL],
+ 'intl402/Collator/prototype/10.3_a': [FAIL],
+ 'intl402/Date/prototype/13.3.0_7': [FAIL],
+ 'intl402/DateTimeFormat/12.1.1': [FAIL],
+ 'intl402/DateTimeFormat/12.1.1_a': [FAIL],
+ 'intl402/DateTimeFormat/12.1.1_1': [FAIL],
+ 'intl402/DateTimeFormat/12.1.2': [PASS, FAIL],
+ 'intl402/DateTimeFormat/12.1.2.1_4': [FAIL],
+ 'intl402/DateTimeFormat/12.2.3_b': [FAIL],
+ 'intl402/DateTimeFormat/prototype/12.3.2_FDT_7_a_iv': [FAIL],
+ 'intl402/DateTimeFormat/prototype/12.3.3': [FAIL],
+ 'intl402/DateTimeFormat/prototype/12.3_a': [FAIL],
+ 'intl402/DateTimeFormat/prototype/format/12.3.2_FDT_7_a_iv': [FAIL],
+ 'intl402/Number/prototype/toLocaleString/13.2.1_5': [PASS, FAIL],
+ 'intl402/NumberFormat/11.1.1_20_c': [FAIL],
+ 'intl402/NumberFormat/11.1.1_a': [FAIL],
+ 'intl402/NumberFormat/11.1.1': [FAIL],
+ 'intl402/NumberFormat/11.1.2': [PASS, FAIL],
+ 'intl402/NumberFormat/11.1.2.1_4': [FAIL],
+ 'intl402/NumberFormat/11.2.3_b': [FAIL],
+ 'intl402/NumberFormat/prototype/11.3_a': [FAIL],
+ 'intl402/String/prototype/localeCompare/13.1.1_7': [PASS, FAIL],
##################### DELIBERATE INCOMPATIBILITIES #####################
@@ -767,6 +761,13 @@
'built-ins/Object/keys/15.2.3.14-1-2': [PASS, FAIL_OK],
'built-ins/Object/keys/15.2.3.14-1-3': [PASS, FAIL_OK],
+ # Test bug https://github.com/tc39/test262/issues/405
+ 'intl402/Collator/prototype/compare/10.3.2_1_c': [PASS, FAIL_OK],
+ 'intl402/Collator/prototype/compare/10.3.2_CS_b_NN': [PASS, FAIL_OK],
+ 'intl402/Collator/prototype/compare/10.3.2_CS_c_NN': [PASS, FAIL_OK],
+ 'intl402/Collator/prototype/compare/10.3.2_CS_d_NN': [PASS, FAIL_OK],
+ 'intl402/Date/prototype/13.3.0_7': [PASS, FAIL_OK],
+
############################ SKIPPED TESTS #############################
# These tests take a looong time to run.
diff --git a/deps/v8/test/test262-es6/testcfg.py b/deps/v8/test/test262-es6/testcfg.py
index 91491b3907..88f4ad1297 100644
--- a/deps/v8/test/test262-es6/testcfg.py
+++ b/deps/v8/test/test262-es6/testcfg.py
@@ -39,8 +39,8 @@ from testrunner.local import utils
from testrunner.objects import testcase
# The revision hash needs to be 7 characters?
-TEST_262_ARCHIVE_REVISION = "c6ac390" # This is the 2015-07-06 revision.
-TEST_262_ARCHIVE_MD5 = "e1393ef330f38e9cb1bfa4e3eada5ba8"
+TEST_262_ARCHIVE_REVISION = "258d212" # This is the 2015-07-31 revision.
+TEST_262_ARCHIVE_MD5 = "a9b26e19ce582492642af973c8cee826"
TEST_262_URL = "https://github.com/tc39/test262/tarball/%s"
TEST_262_HARNESS_FILES = ["sta.js", "assert.js"]
@@ -48,6 +48,55 @@ TEST_262_SUITE_PATH = ["data", "test"]
TEST_262_HARNESS_PATH = ["data", "harness"]
TEST_262_TOOLS_PATH = ["data", "tools", "packaging"]
+ALL_VARIANT_FLAGS_STRICT = dict(
+ (v, [flags + ["--use-strict"] for flags in flag_sets])
+ for v, flag_sets in testsuite.ALL_VARIANT_FLAGS.iteritems()
+)
+
+FAST_VARIANT_FLAGS_STRICT = dict(
+ (v, [flags + ["--use-strict"] for flags in flag_sets])
+ for v, flag_sets in testsuite.FAST_VARIANT_FLAGS.iteritems()
+)
+
+ALL_VARIANT_FLAGS_BOTH = dict(
+ (v, [flags for flags in testsuite.ALL_VARIANT_FLAGS[v] +
+ ALL_VARIANT_FLAGS_STRICT[v]])
+ for v in testsuite.ALL_VARIANT_FLAGS
+)
+
+FAST_VARIANT_FLAGS_BOTH = dict(
+ (v, [flags for flags in testsuite.FAST_VARIANT_FLAGS[v] +
+ FAST_VARIANT_FLAGS_STRICT[v]])
+ for v in testsuite.FAST_VARIANT_FLAGS
+)
+
+ALL_VARIANTS = {
+ 'nostrict': testsuite.ALL_VARIANT_FLAGS,
+ 'strict': ALL_VARIANT_FLAGS_STRICT,
+ 'both': ALL_VARIANT_FLAGS_BOTH,
+}
+
+FAST_VARIANTS = {
+ 'nostrict': testsuite.FAST_VARIANT_FLAGS,
+ 'strict': FAST_VARIANT_FLAGS_STRICT,
+ 'both': FAST_VARIANT_FLAGS_BOTH,
+}
+
+class Test262VariantGenerator(testsuite.VariantGenerator):
+ def GetFlagSets(self, testcase, variant):
+ if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
+ variant_flags = FAST_VARIANTS
+ else:
+ variant_flags = ALL_VARIANTS
+
+ test_record = self.suite.GetTestRecord(testcase)
+ if "noStrict" in test_record:
+ return variant_flags["nostrict"][variant]
+ if "onlyStrict" in test_record:
+ return variant_flags["strict"][variant]
+ return variant_flags["both"][variant]
+
+
class Test262TestSuite(testsuite.TestSuite):
def __init__(self, name, root):
@@ -81,15 +130,8 @@ class Test262TestSuite(testsuite.TestSuite):
self.GetIncludesForTest(testcase) + ["--harmony"] +
[os.path.join(self.testroot, testcase.path + ".js")])
- def VariantFlags(self, testcase, default_flags):
- flags = super(Test262TestSuite, self).VariantFlags(testcase, default_flags)
- test_record = self.GetTestRecord(testcase)
- if "noStrict" in test_record:
- return flags
- strict_flags = [f + ["--use-strict"] for f in flags]
- if "onlyStrict" in test_record:
- return strict_flags
- return flags + strict_flags
+ def _VariantGeneratorFactory(self):
+ return Test262VariantGenerator
def LoadParseTestRecord(self):
if not self.ParseTestRecord:
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index feed1a3206..b9ef3c68f4 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -281,6 +281,9 @@
'15.2.3.13-1-3': [FAIL],
'15.2.3.13-1-4': [FAIL],
+ # ES6 says for dates to default to the local timezone if none is specified
+ '15.9.1.15-1': [FAIL],
+
######################## NEEDS INVESTIGATION ###########################
# These test failures are specific to the intl402 suite and need investigation
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 7e67b31616..71c2d44d2f 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -2421,6 +2421,40 @@ TEST_F(InstructionSelectorTest, Word32EqualWithSignedExtendHalfword) {
}
+TEST_F(InstructionSelectorTest, Word32EqualZeroWithWord32Equal) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ m.Return(m.Word32Equal(m.Word32Equal(p0, p1), m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ m.Return(m.Word32Equal(m.Int32Constant(0), m.Word32Equal(p0, p1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Miscellaneous
diff --git a/deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc b/deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc
new file mode 100644
index 0000000000..ea9ebdb20b
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc
@@ -0,0 +1,309 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/coalesced-live-ranges.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// Utility offering shorthand syntax for building up a range by providing its ID
+// and pairs (start, end) specifying intervals. Circumvents current incomplete
+// support for C++ features such as instantiation lists, on OS X and Android.
+class TestRangeBuilder {
+ public:
+ explicit TestRangeBuilder(Zone* zone) : id_(-1), pairs_(), zone_(zone) {}
+
+ TestRangeBuilder& Id(int id) {
+ id_ = id;
+ return *this;
+ }
+ TestRangeBuilder& Add(int start, int end) {
+ pairs_.push_back({start, end});
+ return *this;
+ }
+
+ LiveRange* Build(int start, int end) { return Add(start, end).Build(); }
+
+ LiveRange* Build() {
+ LiveRange* range = new (zone_) LiveRange(id_, MachineType::kRepTagged);
+ // Traverse the provided interval specifications backwards, because that is
+ // what LiveRange expects.
+ for (int i = static_cast<int>(pairs_.size()) - 1; i >= 0; --i) {
+ Interval pair = pairs_[i];
+ LifetimePosition start = LifetimePosition::FromInt(pair.first);
+ LifetimePosition end = LifetimePosition::FromInt(pair.second);
+ CHECK(start < end);
+ range->AddUseInterval(start, end, zone_);
+ }
+
+ pairs_.clear();
+ return range;
+ }
+
+ private:
+ typedef std::pair<int, int> Interval;
+ typedef std::vector<Interval> IntervalList;
+ int id_;
+ IntervalList pairs_;
+ Zone* zone_;
+};
+
+
+class CoalescedLiveRangesTest : public TestWithZone {
+ public:
+ CoalescedLiveRangesTest() : TestWithZone(), ranges_(zone()) {}
+ bool HasNoConflicts(const LiveRange* range);
+ bool ConflictsPreciselyWith(const LiveRange* range, int id);
+ bool ConflictsPreciselyWith(const LiveRange* range, int id1, int id2);
+
+ CoalescedLiveRanges& ranges() { return ranges_; }
+ const CoalescedLiveRanges& ranges() const { return ranges_; }
+ bool AllocationsAreValid() const;
+ void RemoveConflicts(LiveRange* range);
+
+ private:
+ typedef ZoneSet<int> LiveRangeIDs;
+ bool IsRangeConflictingWith(const LiveRange* range, const LiveRangeIDs& ids);
+ CoalescedLiveRanges ranges_;
+};
+
+
+bool CoalescedLiveRangesTest::ConflictsPreciselyWith(const LiveRange* range,
+ int id) {
+ LiveRangeIDs set(zone());
+ set.insert(id);
+ return IsRangeConflictingWith(range, set);
+}
+
+
+bool CoalescedLiveRangesTest::ConflictsPreciselyWith(const LiveRange* range,
+ int id1, int id2) {
+ LiveRangeIDs set(zone());
+ set.insert(id1);
+ set.insert(id2);
+ return IsRangeConflictingWith(range, set);
+}
+
+
+bool CoalescedLiveRangesTest::HasNoConflicts(const LiveRange* range) {
+ LiveRangeIDs set(zone());
+ return IsRangeConflictingWith(range, set);
+}
+
+
+void CoalescedLiveRangesTest::RemoveConflicts(LiveRange* range) {
+ auto conflicts = ranges().GetConflicts(range);
+ LiveRangeIDs seen(zone());
+ for (auto c = conflicts.Current(); c != nullptr;
+ c = conflicts.RemoveCurrentAndGetNext()) {
+ EXPECT_FALSE(seen.count(c->id()) > 0);
+ seen.insert(c->id());
+ }
+}
+
+
+bool CoalescedLiveRangesTest::AllocationsAreValid() const {
+ return ranges().VerifyAllocationsAreValidForTesting();
+}
+
+
+bool CoalescedLiveRangesTest::IsRangeConflictingWith(const LiveRange* range,
+ const LiveRangeIDs& ids) {
+ LiveRangeIDs found_ids(zone());
+
+ auto conflicts = ranges().GetConflicts(range);
+ for (auto conflict = conflicts.Current(); conflict != nullptr;
+ conflict = conflicts.GetNext()) {
+ found_ids.insert(conflict->id());
+ }
+ return found_ids == ids;
+}
+
+
+TEST_F(CoalescedLiveRangesTest, VisitEmptyAllocations) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
+ ASSERT_TRUE(ranges().empty());
+ ASSERT_TRUE(AllocationsAreValid());
+ ASSERT_TRUE(HasNoConflicts(range));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, CandidateBeforeAfterAllocations) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(5, 6);
+ ranges().AllocateRange(range);
+ ASSERT_FALSE(ranges().empty());
+ ASSERT_TRUE(AllocationsAreValid());
+ LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 2);
+ ASSERT_TRUE(HasNoConflicts(query));
+ query = TestRangeBuilder(zone()).Id(3).Build(1, 5);
+ ASSERT_TRUE(HasNoConflicts(query));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, CandidateBeforeAfterManyAllocations) {
+ LiveRange* range =
+ TestRangeBuilder(zone()).Id(1).Add(5, 7).Add(10, 12).Build();
+ ranges().AllocateRange(range);
+ ASSERT_FALSE(ranges().empty());
+ ASSERT_TRUE(AllocationsAreValid());
+ LiveRange* query =
+ TestRangeBuilder(zone()).Id(2).Add(1, 2).Add(13, 15).Build();
+ ASSERT_TRUE(HasNoConflicts(query));
+ query = TestRangeBuilder(zone()).Id(3).Add(1, 5).Add(12, 15).Build();
+ ASSERT_TRUE(HasNoConflicts(query));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, SelfConflictsPreciselyWithSelf) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
+ ranges().AllocateRange(range);
+ ASSERT_FALSE(ranges().empty());
+ ASSERT_TRUE(AllocationsAreValid());
+ ASSERT_TRUE(ConflictsPreciselyWith(range, 1));
+ range = TestRangeBuilder(zone()).Id(2).Build(8, 10);
+ ranges().AllocateRange(range);
+ ASSERT_TRUE(ConflictsPreciselyWith(range, 2));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, QueryStartsBeforeConflict) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 5);
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 3);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
+ range = TestRangeBuilder(zone()).Id(3).Build(8, 10);
+ ranges().AllocateRange(range);
+ query = TestRangeBuilder(zone()).Id(4).Build(6, 9);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 3));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, QueryStartsInConflict) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 5);
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(3, 6);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
+ range = TestRangeBuilder(zone()).Id(3).Build(8, 10);
+ ranges().AllocateRange(range);
+ query = TestRangeBuilder(zone()).Id(4).Build(9, 11);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 3));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, QueryContainedInConflict) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(2, 3);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, QueryContainsConflict) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 3);
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 5);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, QueryCoversManyIntervalsSameRange) {
+ LiveRange* range =
+ TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(7, 9).Add(20, 25).Build();
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(2, 8);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, QueryCoversManyIntervalsDifferentRanges) {
+ LiveRange* range =
+ TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(20, 25).Build();
+ ranges().AllocateRange(range);
+ range = TestRangeBuilder(zone()).Id(2).Build(7, 10);
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(2, 22);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 1, 2));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, QueryFitsInGaps) {
+ LiveRange* range =
+ TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(10, 15).Add(20, 25).Build();
+ ranges().AllocateRange(range);
+ LiveRange* query =
+ TestRangeBuilder(zone()).Id(3).Add(5, 10).Add(16, 19).Add(27, 30).Build();
+ ASSERT_TRUE(HasNoConflicts(query));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, DeleteConflictBefore) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Add(1, 4).Add(5, 6).Build();
+ ranges().AllocateRange(range);
+ range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(3, 7);
+ RemoveConflicts(query);
+ query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, DeleteConflictAfter) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
+ ranges().AllocateRange(range);
+ range = TestRangeBuilder(zone()).Id(2).Add(40, 50).Add(60, 70).Build();
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(45, 60);
+ RemoveConflicts(query);
+ query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, DeleteConflictStraddle) {
+ LiveRange* range =
+ TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(10, 20).Build();
+ ranges().AllocateRange(range);
+ range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(4, 15);
+ RemoveConflicts(query);
+ query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, DeleteConflictManyOverlapsBefore) {
+ LiveRange* range =
+ TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(6, 10).Add(10, 20).Build();
+ ranges().AllocateRange(range);
+ range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(4, 15);
+ RemoveConflicts(query);
+ query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, DeleteWhenConflictRepeatsAfterNonConflict) {
+ LiveRange* range =
+ TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(6, 10).Add(20, 30).Build();
+ ranges().AllocateRange(range);
+ range = TestRangeBuilder(zone()).Id(2).Build(12, 15);
+ ranges().AllocateRange(range);
+ LiveRange* query =
+ TestRangeBuilder(zone()).Id(3).Add(1, 8).Add(22, 25).Build();
+ RemoveConflicts(query);
+ query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/compiler-test-utils.h b/deps/v8/test/unittests/compiler/compiler-test-utils.h
index 6ce28f9f94..7873c961b1 100644
--- a/deps/v8/test/unittests/compiler/compiler-test-utils.h
+++ b/deps/v8/test/unittests/compiler/compiler-test-utils.h
@@ -14,41 +14,25 @@ namespace compiler {
// The TARGET_TEST(Case, Name) macro works just like
// TEST(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
-#if V8_TURBOFAN_TARGET
#define TARGET_TEST(Case, Name) TEST(Case, Name)
-#else
-#define TARGET_TEST(Case, Name) TEST(Case, DISABLED_##Name)
-#endif
// The TARGET_TEST_F(Case, Name) macro works just like
// TEST_F(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
-#if V8_TURBOFAN_TARGET
#define TARGET_TEST_F(Case, Name) TEST_F(Case, Name)
-#else
-#define TARGET_TEST_F(Case, Name) TEST_F(Case, DISABLED_##Name)
-#endif
// The TARGET_TEST_P(Case, Name) macro works just like
// TEST_P(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
-#if V8_TURBOFAN_TARGET
#define TARGET_TEST_P(Case, Name) TEST_P(Case, Name)
-#else
-#define TARGET_TEST_P(Case, Name) TEST_P(Case, DISABLED_##Name)
-#endif
// The TARGET_TYPED_TEST(Case, Name) macro works just like
// TYPED_TEST(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
-#if V8_TURBOFAN_TARGET
#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, Name)
-#else
-#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, DISABLED_##Name)
-#endif
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
index acab91b009..1636b7ee5b 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
@@ -153,7 +153,7 @@ InstructionSelectorTest::StreamBuilder::GetFrameStateFunctionInfo(
int parameter_count, int local_count) {
return common()->CreateFrameStateFunctionInfo(
FrameStateType::kJavaScriptFunction, parameter_count, local_count,
- Handle<SharedFunctionInfo>());
+ Handle<SharedFunctionInfo>(), CALL_MAINTAINS_NATIVE_CONTEXT);
}
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h b/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
index 15d3b2005f..574864edf5 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
@@ -39,22 +39,22 @@ class InstructionSelectorTest : public TestWithContext,
StreamBuilder(InstructionSelectorTest* test, MachineType return_type)
: RawMachineAssembler(
test->isolate(), new (test->zone()) Graph(test->zone()),
- MakeMachineSignature(test->zone(), return_type), kMachPtr,
+ MakeCallDescriptor(test->zone(), return_type), kMachPtr,
MachineOperatorBuilder::kAllOptionalOps),
test_(test) {}
StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
MachineType parameter0_type)
: RawMachineAssembler(
test->isolate(), new (test->zone()) Graph(test->zone()),
- MakeMachineSignature(test->zone(), return_type, parameter0_type),
+ MakeCallDescriptor(test->zone(), return_type, parameter0_type),
kMachPtr, MachineOperatorBuilder::kAllOptionalOps),
test_(test) {}
StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
MachineType parameter0_type, MachineType parameter1_type)
: RawMachineAssembler(
test->isolate(), new (test->zone()) Graph(test->zone()),
- MakeMachineSignature(test->zone(), return_type, parameter0_type,
- parameter1_type),
+ MakeCallDescriptor(test->zone(), return_type, parameter0_type,
+ parameter1_type),
kMachPtr, MachineOperatorBuilder::kAllOptionalOps),
test_(test) {}
StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
@@ -62,8 +62,8 @@ class InstructionSelectorTest : public TestWithContext,
MachineType parameter2_type)
: RawMachineAssembler(
test->isolate(), new (test->zone()) Graph(test->zone()),
- MakeMachineSignature(test->zone(), return_type, parameter0_type,
- parameter1_type, parameter2_type),
+ MakeCallDescriptor(test->zone(), return_type, parameter0_type,
+ parameter1_type, parameter2_type),
kMachPtr, MachineOperatorBuilder::kAllOptionalOps),
test_(test) {}
@@ -85,41 +85,40 @@ class InstructionSelectorTest : public TestWithContext,
int local_count);
private:
- MachineSignature* MakeMachineSignature(Zone* zone,
- MachineType return_type) {
+ CallDescriptor* MakeCallDescriptor(Zone* zone, MachineType return_type) {
MachineSignature::Builder builder(zone, 1, 0);
builder.AddReturn(return_type);
- return builder.Build();
+ return Linkage::GetSimplifiedCDescriptor(zone, builder.Build());
}
- MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
- MachineType parameter0_type) {
+ CallDescriptor* MakeCallDescriptor(Zone* zone, MachineType return_type,
+ MachineType parameter0_type) {
MachineSignature::Builder builder(zone, 1, 1);
builder.AddReturn(return_type);
builder.AddParam(parameter0_type);
- return builder.Build();
+ return Linkage::GetSimplifiedCDescriptor(zone, builder.Build());
}
- MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
- MachineType parameter0_type,
- MachineType parameter1_type) {
+ CallDescriptor* MakeCallDescriptor(Zone* zone, MachineType return_type,
+ MachineType parameter0_type,
+ MachineType parameter1_type) {
MachineSignature::Builder builder(zone, 1, 2);
builder.AddReturn(return_type);
builder.AddParam(parameter0_type);
builder.AddParam(parameter1_type);
- return builder.Build();
+ return Linkage::GetSimplifiedCDescriptor(zone, builder.Build());
}
- MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
- MachineType parameter0_type,
- MachineType parameter1_type,
- MachineType parameter2_type) {
+ CallDescriptor* MakeCallDescriptor(Zone* zone, MachineType return_type,
+ MachineType parameter0_type,
+ MachineType parameter1_type,
+ MachineType parameter2_type) {
MachineSignature::Builder builder(zone, 1, 3);
builder.AddReturn(return_type);
builder.AddParam(parameter0_type);
builder.AddParam(parameter1_type);
builder.AddParam(parameter2_type);
- return builder.Build();
+ return Linkage::GetSimplifiedCDescriptor(zone, builder.Build());
}
private:
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
index 1ff441f746..65a7f299c5 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
@@ -93,9 +93,9 @@ void InstructionSequenceTest::EndLoop() {
}
-void InstructionSequenceTest::StartBlock() {
+void InstructionSequenceTest::StartBlock(bool deferred) {
block_returns_ = false;
- NewBlock();
+ NewBlock(deferred);
}
@@ -408,7 +408,7 @@ InstructionOperand InstructionSequenceTest::ConvertOutputOp(VReg vreg,
}
-InstructionBlock* InstructionSequenceTest::NewBlock() {
+InstructionBlock* InstructionSequenceTest::NewBlock(bool deferred) {
CHECK(current_block_ == nullptr);
Rpo rpo = Rpo::FromInt(static_cast<int>(instruction_blocks_.size()));
Rpo loop_header = Rpo::Invalid();
@@ -430,7 +430,7 @@ InstructionBlock* InstructionSequenceTest::NewBlock() {
}
// Construct instruction block.
auto instruction_block = new (zone())
- InstructionBlock(zone(), rpo, loop_header, loop_end, false, false);
+ InstructionBlock(zone(), rpo, loop_header, loop_end, deferred, false);
instruction_blocks_.push_back(instruction_block);
current_block_ = instruction_block;
sequence()->StartBlock(rpo);
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
index 2d75da7e47..54317ede21 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
+++ b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
@@ -126,7 +126,7 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
void StartLoop(int loop_blocks);
void EndLoop();
- void StartBlock();
+ void StartBlock(bool deferred = false);
Instruction* EndBlock(BlockCompletion completion = FallThrough());
TestOperand Imm(int32_t imm = 0);
@@ -203,7 +203,7 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
InstructionOperand* ConvertInputs(size_t input_size, TestOperand* inputs);
InstructionOperand ConvertInputOp(TestOperand op);
InstructionOperand ConvertOutputOp(VReg vreg, TestOperand op);
- InstructionBlock* NewBlock();
+ InstructionBlock* NewBlock(bool deferred = false);
void WireBlock(size_t block_offset, int jump_offset);
Instruction* Emit(InstructionCode code, size_t outputs_size = 0,
@@ -223,7 +223,7 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
typedef std::map<int, const Instruction*> Instructions;
typedef std::vector<BlockCompletion> Completions;
- SmartPointer<RegisterConfiguration> config_;
+ base::SmartPointer<RegisterConfiguration> config_;
InstructionSequence* sequence_;
int num_general_registers_;
int num_double_registers_;
diff --git a/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc
new file mode 100644
index 0000000000..a869f7ebb1
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc
@@ -0,0 +1,262 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/compiler/interpreter-assembler-unittest.h"
+
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/unique.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+using ::testing::_;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+const interpreter::Bytecode kBytecodes[] = {
+#define DEFINE_BYTECODE(Name, ...) interpreter::Bytecode::k##Name,
+ BYTECODE_LIST(DEFINE_BYTECODE)
+#undef DEFINE_BYTECODE
+};
+
+
+Matcher<Node*> IsIntPtrAdd(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsInt64Add(lhs_matcher, rhs_matcher)
+ : IsInt32Add(lhs_matcher, rhs_matcher);
+}
+
+
+Matcher<Node*> IsIntPtrSub(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsInt64Sub(lhs_matcher, rhs_matcher)
+ : IsInt32Sub(lhs_matcher, rhs_matcher);
+}
+
+
+Matcher<Node*> IsWordShl(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsWord64Shl(lhs_matcher, rhs_matcher)
+ : IsWord32Shl(lhs_matcher, rhs_matcher);
+}
+
+
+Matcher<Node*> IsWordSar(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsWord64Sar(lhs_matcher, rhs_matcher)
+ : IsWord32Sar(lhs_matcher, rhs_matcher);
+}
+
+
+Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
+ const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher) {
+ return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher,
+ graph()->start(), graph()->start());
+}
+
+
+Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
+ const Matcher<StoreRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher) {
+ return ::i::compiler::IsStore(rep_matcher, base_matcher, index_matcher,
+ value_matcher, graph()->start(),
+ graph()->start());
+}
+
+
+Matcher<Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperand(
+ int operand) {
+ return IsLoad(
+ kMachUint8, IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(1 + operand)));
+}
+
+
+Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
+ IsBytecodeOperandSignExtended(int operand) {
+ Matcher<Node*> load_matcher = IsLoad(
+ kMachInt8, IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(1 + operand)));
+ if (kPointerSize == 8) {
+ load_matcher = IsChangeInt32ToInt64(load_matcher);
+ }
+ return load_matcher;
+}
+
+
+Graph*
+InterpreterAssemblerTest::InterpreterAssemblerForTest::GetCompletedGraph() {
+ End();
+ return graph();
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ m.Dispatch();
+ Graph* graph = m.GetCompletedGraph();
+
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* tail_call_node = end->InputAt(0);
+
+ Matcher<Node*> next_bytecode_offset_matcher =
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(interpreter::Bytecodes::Size(bytecode)));
+ Matcher<Node*> target_bytecode_matcher = m.IsLoad(
+ kMachUint8, IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ next_bytecode_offset_matcher);
+ Matcher<Node*> code_target_matcher = m.IsLoad(
+ kMachPtr, IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsWord32Shl(target_bytecode_matcher,
+ IsInt32Constant(kPointerSizeLog2)));
+
+ EXPECT_EQ(CallDescriptor::kCallCodeObject, m.call_descriptor()->kind());
+ EXPECT_TRUE(m.call_descriptor()->flags() & CallDescriptor::kCanUseRoots);
+ EXPECT_THAT(
+ tail_call_node,
+ IsTailCall(m.call_descriptor(), code_target_matcher,
+ IsParameter(Linkage::kInterpreterAccumulatorParameter),
+ IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ next_bytecode_offset_matcher,
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ graph->start(), graph->start()));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, Return) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ m.Return();
+ Graph* graph = m.GetCompletedGraph();
+
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* tail_call_node = end->InputAt(0);
+
+ EXPECT_EQ(CallDescriptor::kCallCodeObject, m.call_descriptor()->kind());
+ EXPECT_TRUE(m.call_descriptor()->flags() & CallDescriptor::kCanUseRoots);
+ Matcher<Unique<HeapObject>> exit_trampoline(
+ Unique<HeapObject>::CreateImmovable(
+ isolate()->builtins()->InterpreterExitTrampoline()));
+ EXPECT_THAT(
+ tail_call_node,
+ IsTailCall(m.call_descriptor(), IsHeapConstant(exit_trampoline),
+ IsParameter(Linkage::kInterpreterAccumulatorParameter),
+ IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ graph->start(), graph->start()));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ int number_of_operands = interpreter::Bytecodes::NumberOfOperands(bytecode);
+ for (int i = 0; i < number_of_operands; i++) {
+ switch (interpreter::Bytecodes::GetOperandType(bytecode, i)) {
+ case interpreter::OperandType::kImm8:
+ EXPECT_THAT(m.BytecodeOperandImm8(i),
+ m.IsBytecodeOperandSignExtended(i));
+ break;
+ case interpreter::OperandType::kReg:
+ EXPECT_THAT(m.BytecodeOperandReg(i),
+ m.IsBytecodeOperandSignExtended(i));
+ break;
+ case interpreter::OperandType::kNone:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ // Should be incoming accumulator if not set.
+ EXPECT_THAT(m.GetAccumulator(),
+ IsParameter(Linkage::kInterpreterAccumulatorParameter));
+
+ // Should be set by SedtAccumulator.
+ Node* accumulator_value_1 = m.Int32Constant(0xdeadbeef);
+ m.SetAccumulator(accumulator_value_1);
+ EXPECT_THAT(m.GetAccumulator(), accumulator_value_1);
+ Node* accumulator_value_2 = m.Int32Constant(42);
+ m.SetAccumulator(accumulator_value_2);
+ EXPECT_THAT(m.GetAccumulator(), accumulator_value_2);
+
+ // Should be passed to next bytecode handler on dispatch.
+ m.Dispatch();
+ Graph* graph = m.GetCompletedGraph();
+
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* tail_call_node = end->InputAt(0);
+
+ EXPECT_THAT(tail_call_node,
+ IsTailCall(m.call_descriptor(), _, accumulator_value_2, _, _, _,
+ _, graph->start(), graph->start()));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* reg_index_node = m.Int32Constant(44);
+ Node* load_reg_node = m.LoadRegister(reg_index_node);
+ EXPECT_THAT(
+ load_reg_node,
+ m.IsLoad(kMachPtr,
+ IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2))));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* store_value = m.Int32Constant(0xdeadbeef);
+ Node* reg_index_node = m.Int32Constant(44);
+ Node* store_reg_node = m.StoreRegister(store_value, reg_index_node);
+ EXPECT_THAT(
+ store_reg_node,
+ m.IsStore(StoreRepresentation(kMachPtr, kNoWriteBarrier),
+ IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2)),
+ store_value));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, SmiTag) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* value = m.Int32Constant(44);
+ EXPECT_THAT(m.SmiTag(value),
+ IsWordShl(value, IsInt32Constant(kSmiShiftSize + kSmiTagSize)));
+ EXPECT_THAT(m.SmiUntag(value),
+ IsWordSar(value, IsInt32Constant(kSmiShiftSize + kSmiTagSize)));
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h b/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h
new file mode 100644
index 0000000000..64353ae128
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h
@@ -0,0 +1,56 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_COMPILER_INTERPRETER_ASSEMBLER_UNITTEST_H_
+#define V8_UNITTESTS_COMPILER_INTERPRETER_ASSEMBLER_UNITTEST_H_
+
+#include "src/compiler/interpreter-assembler.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock-support.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+using ::testing::Matcher;
+
+class InterpreterAssemblerTest : public TestWithIsolateAndZone {
+ public:
+ InterpreterAssemblerTest() {}
+ ~InterpreterAssemblerTest() override {}
+
+ class InterpreterAssemblerForTest final : public InterpreterAssembler {
+ public:
+ InterpreterAssemblerForTest(InterpreterAssemblerTest* test,
+ interpreter::Bytecode bytecode)
+ : InterpreterAssembler(test->isolate(), test->zone(), bytecode) {}
+ ~InterpreterAssemblerForTest() override {}
+
+ Graph* GetCompletedGraph();
+
+ Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher);
+ Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher);
+ Matcher<Node*> IsBytecodeOperand(int operand);
+ Matcher<Node*> IsBytecodeOperandSignExtended(int operand);
+
+ using InterpreterAssembler::call_descriptor;
+ using InterpreterAssembler::graph;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InterpreterAssemblerForTest);
+ };
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNITTESTS_COMPILER_INTERPRETER_ASSEMBLER_UNITTEST_H_
diff --git a/deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc b/deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc
new file mode 100644
index 0000000000..b52417de2f
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc
@@ -0,0 +1,306 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-context-relaxation.h"
+#include "src/compiler/js-graph.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSContextRelaxationTest : public GraphTest {
+ public:
+ JSContextRelaxationTest() : GraphTest(3), javascript_(zone()) {}
+ ~JSContextRelaxationTest() override {}
+
+ protected:
+ Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kNoFlags) {
+ MachineOperatorBuilder machine(zone(), kMachPtr, flags);
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), &machine);
+ // TODO(titzer): mock the GraphReducer here for better unit testing.
+ GraphReducer graph_reducer(zone(), graph());
+ JSContextRelaxation reducer;
+ return reducer.Reduce(node);
+ }
+
+ Node* EmptyFrameState() {
+ MachineOperatorBuilder machine(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), &machine);
+ return jsgraph.EmptyFrameState();
+ }
+
+ Node* ShallowFrameStateChain(Node* outer_context,
+ ContextCallingMode context_calling_mode) {
+ const FrameStateFunctionInfo* const frame_state_function_info =
+ common()->CreateFrameStateFunctionInfo(
+ FrameStateType::kJavaScriptFunction, 3, 0,
+ Handle<SharedFunctionInfo>(), context_calling_mode);
+ const Operator* op = common()->FrameState(BailoutId::None(),
+ OutputFrameStateCombine::Ignore(),
+ frame_state_function_info);
+ return graph()->NewNode(op, graph()->start(), graph()->start(),
+ graph()->start(), outer_context, graph()->start(),
+ graph()->start());
+ }
+
+ Node* DeepFrameStateChain(Node* outer_context,
+ ContextCallingMode context_calling_mode) {
+ const FrameStateFunctionInfo* const frame_state_function_info =
+ common()->CreateFrameStateFunctionInfo(
+ FrameStateType::kJavaScriptFunction, 3, 0,
+ Handle<SharedFunctionInfo>(), context_calling_mode);
+ const Operator* op = common()->FrameState(BailoutId::None(),
+ OutputFrameStateCombine::Ignore(),
+ frame_state_function_info);
+ Node* shallow_frame_state =
+ ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ return graph()->NewNode(op, graph()->start(), graph()->start(),
+ graph()->start(), graph()->start(),
+ graph()->start(), shallow_frame_state);
+ }
+
+ JSOperatorBuilder* javascript() { return &javascript_; }
+
+ private:
+ JSOperatorBuilder javascript_;
+};
+
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionShallowFrameStateChainNoCrossCtx) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ Node* const frame_state =
+ ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* node =
+ graph()->NewNode(javascript()->CallFunction(2, NO_CALL_FUNCTION_FLAGS,
+ STRICT, VectorSlotPair()),
+ input0, input1, context, frame_state, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_TRUE(r.Changed());
+ EXPECT_EQ(outer_context, NodeProperties::GetContextInput(node));
+}
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionShallowFrameStateChainCrossCtx) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ Node* const frame_state =
+ ShallowFrameStateChain(outer_context, CALL_CHANGES_NATIVE_CONTEXT);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* node =
+ graph()->NewNode(javascript()->CallFunction(2, NO_CALL_FUNCTION_FLAGS,
+ STRICT, VectorSlotPair()),
+ input0, input1, context, frame_state, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_FALSE(r.Changed());
+ EXPECT_EQ(context, NodeProperties::GetContextInput(node));
+}
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionDeepFrameStateChainNoCrossCtx) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ Node* const frame_state =
+ DeepFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* node =
+ graph()->NewNode(javascript()->CallFunction(2, NO_CALL_FUNCTION_FLAGS,
+ STRICT, VectorSlotPair()),
+ input0, input1, context, frame_state, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_TRUE(r.Changed());
+ EXPECT_EQ(outer_context, NodeProperties::GetContextInput(node));
+}
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionDeepFrameStateChainCrossCtx) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ Node* const frame_state =
+ DeepFrameStateChain(outer_context, CALL_CHANGES_NATIVE_CONTEXT);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* node =
+ graph()->NewNode(javascript()->CallFunction(2, NO_CALL_FUNCTION_FLAGS,
+ STRICT, VectorSlotPair()),
+ input0, input1, context, frame_state, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_FALSE(r.Changed());
+ EXPECT_EQ(context, NodeProperties::GetContextInput(node));
+}
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionDeepContextChainFullRelaxForCatch) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ const Operator* op = javascript()->CreateCatchContext(Unique<String>());
+ Node* const frame_state_1 =
+ ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* nested_context =
+ graph()->NewNode(op, graph()->start(), graph()->start(), outer_context,
+ frame_state_1, effect, control);
+ Node* const frame_state_2 =
+ ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* node =
+ graph()->NewNode(javascript()->CallFunction(2, NO_CALL_FUNCTION_FLAGS,
+ STRICT, VectorSlotPair()),
+ input0, input1, context, frame_state_2, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_TRUE(r.Changed());
+ EXPECT_EQ(outer_context, NodeProperties::GetContextInput(node));
+}
+
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionDeepContextChainFullRelaxForWith) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ const Operator* op = javascript()->CreateWithContext();
+ Node* const frame_state_1 =
+ ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* nested_context =
+ graph()->NewNode(op, graph()->start(), graph()->start(), outer_context,
+ frame_state_1, effect, control);
+ Node* const frame_state_2 =
+ ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* node =
+ graph()->NewNode(javascript()->CallFunction(2, NO_CALL_FUNCTION_FLAGS,
+ STRICT, VectorSlotPair()),
+ input0, input1, context, frame_state_2, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_TRUE(r.Changed());
+ EXPECT_EQ(outer_context, NodeProperties::GetContextInput(node));
+}
+
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionDeepContextChainFullRelaxForBlock) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ const Operator* op = javascript()->CreateBlockContext();
+ Node* const frame_state_1 =
+ ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* nested_context =
+ graph()->NewNode(op, graph()->start(), graph()->start(), outer_context,
+ frame_state_1, effect, control);
+ Node* const frame_state_2 =
+ ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* node =
+ graph()->NewNode(javascript()->CallFunction(2, NO_CALL_FUNCTION_FLAGS,
+ STRICT, VectorSlotPair()),
+ input0, input1, context, frame_state_2, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_TRUE(r.Changed());
+ EXPECT_EQ(outer_context, NodeProperties::GetContextInput(node));
+}
+
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionDeepContextChainPartialRelaxForScript) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ const Operator* op = javascript()->CreateScriptContext();
+ Node* const frame_state_1 =
+ ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* nested_context =
+ graph()->NewNode(op, graph()->start(), graph()->start(), outer_context,
+ frame_state_1, effect, control);
+ Node* const frame_state_2 =
+ ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* node =
+ graph()->NewNode(javascript()->CallFunction(2, NO_CALL_FUNCTION_FLAGS,
+ STRICT, VectorSlotPair()),
+ input0, input1, context, frame_state_2, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_TRUE(r.Changed());
+ EXPECT_EQ(nested_context, NodeProperties::GetContextInput(node));
+}
+
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionDeepContextChainPartialRelaxForModule) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ const Operator* op = javascript()->CreateModuleContext();
+ Node* const frame_state_1 =
+ ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* nested_context =
+ graph()->NewNode(op, graph()->start(), graph()->start(), outer_context,
+ frame_state_1, effect, control);
+ Node* const frame_state_2 =
+ ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* node =
+ graph()->NewNode(javascript()->CallFunction(2, NO_CALL_FUNCTION_FLAGS,
+ STRICT, VectorSlotPair()),
+ input0, input1, context, frame_state_2, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_TRUE(r.Changed());
+ EXPECT_EQ(nested_context, NodeProperties::GetContextInput(node));
+}
+
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionDeepContextChainPartialNoRelax) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ const Operator* op = javascript()->CreateFunctionContext();
+ Node* const frame_state_1 =
+ ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* nested_context =
+ graph()->NewNode(op, graph()->start(), graph()->start(), outer_context,
+ frame_state_1, effect, control);
+ Node* const frame_state_2 =
+ ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* node =
+ graph()->NewNode(javascript()->CallFunction(2, NO_CALL_FUNCTION_FLAGS,
+ STRICT, VectorSlotPair()),
+ input0, input1, context, frame_state_2, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_FALSE(r.Changed());
+ EXPECT_EQ(context, NodeProperties::GetContextInput(node));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-type-feedback-unittest.cc b/deps/v8/test/unittests/compiler/js-type-feedback-unittest.cc
index d52242ec7d..251293ddcf 100644
--- a/deps/v8/test/unittests/compiler/js-type-feedback-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-type-feedback-unittest.cc
@@ -85,7 +85,7 @@ class JSTypeFeedbackTest : public TypedGraphTest {
Unique<Name> name = Unique<Name>::CreateUninitialized(
isolate()->factory()->InternalizeUtf8String(string));
const Operator* op = javascript()->LoadGlobal(name, feedback);
- Node* load = graph()->NewNode(op, global, vector, context);
+ Node* load = graph()->NewNode(op, context, global, vector, context);
if (mode == JSTypeFeedbackSpecializer::kDeoptimizationEnabled) {
for (int i = 0; i < OperatorProperties::GetFrameStateInputCount(op);
i++) {
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index a12d79f02b..9d6cca3dbc 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -874,7 +874,11 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithSafeKey) {
}
-TEST_F(JSTypedLoweringTest, JSLoadNamedGlobalConstants) {
+// -----------------------------------------------------------------------------
+// JSLoadGlobal
+
+
+TEST_F(JSTypedLoweringTest, JSLoadGlobalConstants) {
Handle<String> names[] = {
Handle<String>(isolate()->heap()->undefined_string(), isolate()),
Handle<String>(isolate()->heap()->infinity_string(), isolate()),
@@ -897,8 +901,8 @@ TEST_F(JSTypedLoweringTest, JSLoadNamedGlobalConstants) {
for (size_t i = 0; i < arraysize(names); i++) {
Unique<Name> name = Unique<Name>::CreateImmovable(names[i]);
Reduction r = Reduce(graph()->NewNode(
- javascript()->LoadGlobal(name, feedback), global, vector, context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
+ javascript()->LoadGlobal(name, feedback), context, global, vector,
+ context, EmptyFrameState(), EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), matches[i]);
@@ -907,6 +911,31 @@ TEST_F(JSTypedLoweringTest, JSLoadNamedGlobalConstants) {
// -----------------------------------------------------------------------------
+// JSLoadNamed
+
+
+TEST_F(JSTypedLoweringTest, JSLoadNamedStringLength) {
+ VectorSlotPair feedback;
+ Unique<Name> name = Unique<Name>::CreateImmovable(factory()->length_string());
+ Node* const receiver = Parameter(Type::String(), 0);
+ Node* const vector = Parameter(Type::Internal(), 1);
+ Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->LoadNamed(name, feedback, language_mode),
+ receiver, vector, context, EmptyFrameState(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsLoadField(AccessBuilder::ForStringLength(zone()), receiver,
+ effect, control));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
// JSLoadDynamicGlobal
@@ -921,7 +950,8 @@ TEST_F(JSTypedLoweringTest, JSLoadDynamicGlobal) {
for (int i = 0; i < DynamicGlobalAccess::kMaxCheckDepth; ++i) {
uint32_t bitset = 1 << i; // Only single check.
Reduction r = Reduce(graph()->NewNode(
- javascript()->LoadDynamicGlobal(name, bitset, feedback, NOT_CONTEXTUAL),
+ javascript()->LoadDynamicGlobal(name, bitset, feedback,
+ NOT_INSIDE_TYPEOF),
vector, context, context, frame_state, frame_state, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
@@ -974,7 +1004,6 @@ TEST_F(JSTypedLoweringTest, JSLoadDynamicContext) {
}
}
-#if V8_TURBOFAN_TARGET
// -----------------------------------------------------------------------------
// JSAdd
@@ -1074,8 +1103,6 @@ TEST_F(JSTypedLoweringTest, JSCreateLiteralObject) {
input0, input1, input2, _, context, frame_state, effect, control));
}
-#endif // V8_TURBOFAN_TARGET
-
// -----------------------------------------------------------------------------
// JSCreateWithContext
diff --git a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
new file mode 100644
index 0000000000..5d24a3bd1d
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
@@ -0,0 +1,324 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+MachineType kMachineTypes[] = {kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
+ kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
+ kMachAnyTagged, kMachAnyTagged};
+}
+
+class LinkageTailCall : public TestWithZone {
+ protected:
+ CallDescriptor* NewStandardCallDescriptor(LocationSignature* locations) {
+ DCHECK(arraysize(kMachineTypes) >=
+ locations->return_count() + locations->parameter_count());
+ MachineSignature* types = new (zone()) MachineSignature(
+ locations->return_count(), locations->parameter_count(), kMachineTypes);
+ return new (zone())
+ CallDescriptor(CallDescriptor::kCallCodeObject, kMachAnyTagged,
+ LinkageLocation::ForAnyRegister(),
+ types, // machine_sig
+ locations, // location_sig
+ 0, // js_parameter_count
+ Operator::kNoProperties, // properties
+ 0, // callee-saved
+ 0, // callee-saved fp
+ CallDescriptor::kNoFlags, // flags,
+ "");
+ }
+
+ LinkageLocation StackLocation(int loc) {
+ return LinkageLocation::ForCallerFrameSlot(-loc);
+ }
+
+ LinkageLocation RegisterLocation(int loc) {
+ return LinkageLocation::ForRegister(loc);
+ }
+};
+
+
+TEST_F(LinkageTailCall, EmptyToEmpty) {
+ LocationSignature locations(0, 0, nullptr);
+ CallDescriptor* desc = NewStandardCallDescriptor(&locations);
+ CommonOperatorBuilder common(zone());
+ const Operator* op = common.Call(desc);
+ Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
+ EXPECT_TRUE(desc->CanTailCall(node));
+}
+
+
+TEST_F(LinkageTailCall, SameReturn) {
+ // Caller
+ LinkageLocation location_array[] = {RegisterLocation(0)};
+ LocationSignature locations1(1, 0, location_array);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Callee
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations1);
+
+ CommonOperatorBuilder common(zone());
+ const Operator* op = common.Call(desc2);
+ Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
+ EXPECT_TRUE(desc1->CanTailCall(node));
+}
+
+
+TEST_F(LinkageTailCall, DifferingReturn) {
+ // Caller
+ LinkageLocation location_array1[] = {RegisterLocation(0)};
+ LocationSignature locations1(1, 0, location_array1);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Callee
+ LinkageLocation location_array2[] = {RegisterLocation(1)};
+ LocationSignature locations2(1, 0, location_array2);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations2);
+
+ CommonOperatorBuilder common(zone());
+ const Operator* op = common.Call(desc2);
+ Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
+ EXPECT_FALSE(desc1->CanTailCall(node));
+}
+
+
+TEST_F(LinkageTailCall, MoreRegisterParametersCallee) {
+ // Caller
+ LinkageLocation location_array1[] = {RegisterLocation(0)};
+ LocationSignature locations1(1, 0, location_array1);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Callee
+ LinkageLocation location_array2[] = {RegisterLocation(0),
+ RegisterLocation(0)};
+ LocationSignature locations2(1, 1, location_array2);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations2);
+
+ CommonOperatorBuilder common(zone());
+ const Operator* op = common.Call(desc2);
+ Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
+ EXPECT_TRUE(desc1->CanTailCall(node));
+}
+
+
+TEST_F(LinkageTailCall, MoreRegisterParametersCaller) {
+ // Caller
+ LinkageLocation location_array1[] = {RegisterLocation(0),
+ RegisterLocation(0)};
+ LocationSignature locations1(1, 1, location_array1);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Callee
+ LinkageLocation location_array2[] = {RegisterLocation(0)};
+ LocationSignature locations2(1, 0, location_array2);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations2);
+
+ CommonOperatorBuilder common(zone());
+ const Operator* op = common.Call(desc2);
+ Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
+ EXPECT_TRUE(desc1->CanTailCall(node));
+}
+
+
+TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCallee) {
+ // Caller
+ LinkageLocation location_array1[] = {RegisterLocation(0)};
+ LocationSignature locations1(1, 0, location_array1);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Callee
+ LinkageLocation location_array2[] = {RegisterLocation(0), RegisterLocation(0),
+ RegisterLocation(1), StackLocation(1)};
+ LocationSignature locations2(1, 3, location_array2);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations2);
+
+ CommonOperatorBuilder common(zone());
+ const Operator* op = common.Call(desc2);
+ Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
+ EXPECT_FALSE(desc1->CanTailCall(node));
+}
+
+
+TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCaller) {
+ // Caller
+ LinkageLocation location_array[] = {RegisterLocation(0), RegisterLocation(0),
+ RegisterLocation(1), StackLocation(1)};
+ LocationSignature locations1(1, 3, location_array);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Callee
+ LinkageLocation location_array2[] = {RegisterLocation(0)};
+ LocationSignature locations2(1, 0, location_array2);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations2);
+
+ CommonOperatorBuilder common(zone());
+ const Operator* op = common.Call(desc2);
+ Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
+ EXPECT_FALSE(desc1->CanTailCall(node));
+}
+
+
+TEST_F(LinkageTailCall, MatchingStackParameters) {
+ // Caller
+ LinkageLocation location_array[] = {RegisterLocation(0), StackLocation(3),
+ StackLocation(2), StackLocation(1)};
+ LocationSignature locations1(1, 3, location_array);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Caller
+ LocationSignature locations2(1, 3, location_array);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations1);
+
+ CommonOperatorBuilder common(zone());
+ Node* p0 = Node::New(zone(), 0, nullptr, 0, nullptr, false);
+ Node* p1 = Node::New(zone(), 0, common.Parameter(0), 0, nullptr, false);
+ Node* p2 = Node::New(zone(), 0, common.Parameter(1), 0, nullptr, false);
+ Node* p3 = Node::New(zone(), 0, common.Parameter(2), 0, nullptr, false);
+ Node* parameters[] = {p0, p1, p2, p3};
+ const Operator* op = common.Call(desc2);
+ Node* const node =
+ Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
+ EXPECT_TRUE(desc1->CanTailCall(node));
+}
+
+
+TEST_F(LinkageTailCall, NonMatchingStackParameters) {
+ // Caller
+ LinkageLocation location_array[] = {RegisterLocation(0), StackLocation(3),
+ StackLocation(2), StackLocation(1)};
+ LocationSignature locations1(1, 3, location_array);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Caller
+ LocationSignature locations2(1, 3, location_array);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations1);
+
+ CommonOperatorBuilder common(zone());
+ Node* p0 = Node::New(zone(), 0, nullptr, 0, nullptr, false);
+ Node* p1 = Node::New(zone(), 0, common.Parameter(0), 0, nullptr, false);
+ Node* p2 = Node::New(zone(), 0, common.Parameter(2), 0, nullptr, false);
+ Node* p3 = Node::New(zone(), 0, common.Parameter(1), 0, nullptr, false);
+ Node* parameters[] = {p0, p1, p2, p3};
+ const Operator* op = common.Call(desc2);
+ Node* const node =
+ Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
+ EXPECT_FALSE(desc1->CanTailCall(node));
+}
+
+
+TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegisters) {
+ // Caller
+ LinkageLocation location_array[] = {RegisterLocation(0), StackLocation(3),
+ StackLocation(2), StackLocation(1),
+ RegisterLocation(0), RegisterLocation(1)};
+ LocationSignature locations1(1, 5, location_array);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Caller
+ LocationSignature locations2(1, 3, location_array);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations1);
+
+ CommonOperatorBuilder common(zone());
+ Node* p0 = Node::New(zone(), 0, nullptr, 0, nullptr, false);
+ Node* p1 = Node::New(zone(), 0, common.Parameter(0), 0, nullptr, false);
+ Node* p2 = Node::New(zone(), 0, common.Parameter(1), 0, nullptr, false);
+ Node* p3 = Node::New(zone(), 0, common.Parameter(2), 0, nullptr, false);
+ Node* parameters[] = {p0, p1, p2, p3};
+ const Operator* op = common.Call(desc2);
+ Node* const node =
+ Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
+ EXPECT_TRUE(desc1->CanTailCall(node));
+}
+
+
+TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegisters) {
+ // Caller
+ LinkageLocation location_array[] = {RegisterLocation(0), StackLocation(3),
+ StackLocation(2), StackLocation(1),
+ RegisterLocation(0), RegisterLocation(1)};
+ LocationSignature locations1(1, 3, location_array);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Caller
+ LocationSignature locations2(1, 5, location_array);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations1);
+
+ CommonOperatorBuilder common(zone());
+ Node* p0 = Node::New(zone(), 0, nullptr, 0, nullptr, false);
+ Node* p1 = Node::New(zone(), 0, common.Parameter(0), 0, nullptr, false);
+ Node* p2 = Node::New(zone(), 0, common.Parameter(1), 0, nullptr, false);
+ Node* p3 = Node::New(zone(), 0, common.Parameter(2), 0, nullptr, false);
+ Node* p4 = Node::New(zone(), 0, common.Parameter(3), 0, nullptr, false);
+ Node* parameters[] = {p0, p1, p2, p3, p4};
+ const Operator* op = common.Call(desc2);
+ Node* const node =
+ Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
+ EXPECT_TRUE(desc1->CanTailCall(node));
+}
+
+
+TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegistersAndStack) {
+ // Caller
+ LinkageLocation location_array[] = {RegisterLocation(0), StackLocation(3),
+ StackLocation(2), StackLocation(1),
+ RegisterLocation(0), StackLocation(4)};
+ LocationSignature locations1(1, 5, location_array);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Caller
+ LocationSignature locations2(1, 3, location_array);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations2);
+
+ CommonOperatorBuilder common(zone());
+ Node* p0 = Node::New(zone(), 0, nullptr, 0, nullptr, false);
+ Node* p1 = Node::New(zone(), 0, common.Parameter(0), 0, nullptr, false);
+ Node* p2 = Node::New(zone(), 0, common.Parameter(1), 0, nullptr, false);
+ Node* p3 = Node::New(zone(), 0, common.Parameter(2), 0, nullptr, false);
+ Node* p4 = Node::New(zone(), 0, common.Parameter(3), 0, nullptr, false);
+ Node* parameters[] = {p0, p1, p2, p3, p4};
+ const Operator* op = common.Call(desc2);
+ Node* const node =
+ Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
+ EXPECT_FALSE(desc1->CanTailCall(node));
+}
+
+
+TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegistersAndStack) {
+ // Caller
+ LinkageLocation location_array[] = {RegisterLocation(0), StackLocation(3),
+ StackLocation(2), RegisterLocation(0),
+ RegisterLocation(1), StackLocation(4)};
+ LocationSignature locations1(1, 3, location_array);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Caller
+ LocationSignature locations2(1, 5, location_array);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations2);
+
+ CommonOperatorBuilder common(zone());
+ Node* p0 = Node::New(zone(), 0, nullptr, 0, nullptr, false);
+ Node* p1 = Node::New(zone(), 0, common.Parameter(0), 0, nullptr, false);
+ Node* p2 = Node::New(zone(), 0, common.Parameter(1), 0, nullptr, false);
+ Node* p3 = Node::New(zone(), 0, common.Parameter(2), 0, nullptr, false);
+ Node* p4 = Node::New(zone(), 0, common.Parameter(3), 0, nullptr, false);
+ Node* parameters[] = {p0, p1, p2, p3, p4};
+ const Operator* op = common.Call(desc2);
+ Node* const node =
+ Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
+ EXPECT_FALSE(desc1->CanTailCall(node));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc b/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
index 1e142550d5..3c94c25887 100644
--- a/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
@@ -60,7 +60,7 @@ class LivenessAnalysisTest : public GraphTest {
const FrameStateFunctionInfo* state_info =
common()->CreateFrameStateFunctionInfo(
FrameStateType::kJavaScriptFunction, 0, locals_count_,
- Handle<SharedFunctionInfo>());
+ Handle<SharedFunctionInfo>(), CALL_MAINTAINS_NATIVE_CONTEXT);
const Operator* op = common()->FrameState(
BailoutId(ast_num), OutputFrameStateCombine::Ignore(), state_info);
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index ce11fdef81..b14e9d392d 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -1438,6 +1438,55 @@ TEST_F(MachineOperatorReducerTest, Float64InsertHighWord32WithConstant) {
// -----------------------------------------------------------------------------
+// Float64Equal
+
+
+TEST_F(MachineOperatorReducerTest, Float64EqualWithFloat32Conversions) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Float64Equal(),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p0),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p1)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Equal(p0, p1));
+}
+
+
+// -----------------------------------------------------------------------------
+// Float64LessThan
+
+
+TEST_F(MachineOperatorReducerTest, Float64LessThanWithFloat32Conversions) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Float64LessThan(),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p0),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p1)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32LessThan(p0, p1));
+}
+
+
+// -----------------------------------------------------------------------------
+// Float64LessThanOrEqual
+
+
+TEST_F(MachineOperatorReducerTest,
+ Float64LessThanOrEqualWithFloat32Conversions) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Float64LessThanOrEqual(),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p0),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p1)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32LessThanOrEqual(p0, p1));
+}
+
+
+// -----------------------------------------------------------------------------
// Store
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 520ce0159e..d097ee4b66 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -7,6 +7,7 @@
#include <vector>
#include "src/assembler.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
@@ -1375,6 +1376,27 @@ class IsUnopMatcher final : public NodeMatcher {
const Matcher<Node*> input_matcher_;
};
+class IsParameterMatcher final : public NodeMatcher {
+ public:
+ explicit IsParameterMatcher(const Matcher<int>& index_matcher)
+ : NodeMatcher(IrOpcode::kParameter), index_matcher_(index_matcher) {}
+
+ void DescribeTo(std::ostream* os) const override {
+ *os << "is a Parameter node with index(";
+ index_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(ParameterIndexOf(node->op()), "index",
+ index_matcher_, listener));
+ }
+
+ private:
+ const Matcher<int> index_matcher_;
+};
+
} // namespace
@@ -1678,6 +1700,72 @@ Matcher<Node*> IsTailCall(
}
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ value_matchers.push_back(value3_matcher);
+ return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ value_matchers.push_back(value3_matcher);
+ value_matchers.push_back(value4_matcher);
+ return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ value_matchers.push_back(value3_matcher);
+ value_matchers.push_back(value4_matcher);
+ value_matchers.push_back(value5_matcher);
+ return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
Matcher<Node*> IsReferenceEqual(const Matcher<Type*>& type_matcher,
const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
@@ -1799,6 +1887,16 @@ Matcher<Node*> IsLoadContext(const Matcher<ContextAccess>& access_matcher,
}
+Matcher<Node*> IsParameter(const Matcher<int> index_matcher) {
+ return MakeMatcher(new IsParameterMatcher(index_matcher));
+}
+
+
+Matcher<Node*> IsLoadFramePointer() {
+ return MakeMatcher(new NodeMatcher(IrOpcode::kLoadFramePointer));
+}
+
+
#define IS_BINOP_MATCHER(Name) \
Matcher<Node*> Is##Name(const Matcher<Node*>& lhs_matcher, \
const Matcher<Node*>& rhs_matcher) { \
@@ -1830,8 +1928,13 @@ IS_BINOP_MATCHER(Int32MulHigh)
IS_BINOP_MATCHER(Int32LessThan)
IS_BINOP_MATCHER(Uint32LessThan)
IS_BINOP_MATCHER(Uint32LessThanOrEqual)
+IS_BINOP_MATCHER(Int64Add)
+IS_BINOP_MATCHER(Int64Sub)
IS_BINOP_MATCHER(Float32Max)
IS_BINOP_MATCHER(Float32Min)
+IS_BINOP_MATCHER(Float32Equal)
+IS_BINOP_MATCHER(Float32LessThan)
+IS_BINOP_MATCHER(Float32LessThanOrEqual)
IS_BINOP_MATCHER(Float64Max)
IS_BINOP_MATCHER(Float64Min)
IS_BINOP_MATCHER(Float64Sub)
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index a64d9f009a..149dcfc439 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -134,6 +134,31 @@ Matcher<Node*> IsTailCall(
const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+
Matcher<Node*> IsBooleanNot(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsReferenceEqual(const Matcher<Type*>& type_matcher,
@@ -240,6 +265,10 @@ Matcher<Node*> IsUint32LessThan(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsUint32LessThanOrEqual(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt64Add(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt64Sub(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeFloat64ToUint32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeInt32ToFloat64(const Matcher<Node*>& input_matcher);
@@ -254,6 +283,12 @@ Matcher<Node*> IsFloat32Max(const Matcher<Node*>& lhs_matcher,
Matcher<Node*> IsFloat32Min(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat32Abs(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat32Equal(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat32LessThan(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat32LessThanOrEqual(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Max(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Min(const Matcher<Node*>& lhs_matcher,
@@ -279,6 +314,8 @@ Matcher<Node*> IsLoadContext(const Matcher<ContextAccess>& access_matcher,
const Matcher<Node*>& context_matcher);
Matcher<Node*> IsNumberToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsNumberToUint32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsParameter(const Matcher<int> index_matcher);
+Matcher<Node*> IsLoadFramePointer();
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/unittests/compiler/ppc/OWNERS b/deps/v8/test/unittests/compiler/ppc/OWNERS
index a04d29a94f..eb007cb908 100644
--- a/deps/v8/test/unittests/compiler/ppc/OWNERS
+++ b/deps/v8/test/unittests/compiler/ppc/OWNERS
@@ -1,3 +1,4 @@
+jyan@ca.ibm.com
dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
diff --git a/deps/v8/test/unittests/compiler/register-allocator-unittest.cc b/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
index 873b4ecd2a..23a118b6ad 100644
--- a/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
@@ -9,6 +9,75 @@ namespace v8 {
namespace internal {
namespace compiler {
+
+namespace {
+
+// We can't just use the size of the moves collection, because of
+// redundant moves which need to be discounted.
+int GetMoveCount(const ParallelMove& moves) {
+ int move_count = 0;
+ for (auto move : moves) {
+ if (move->IsEliminated() || move->IsRedundant()) continue;
+ ++move_count;
+ }
+ return move_count;
+}
+
+
+bool AreOperandsOfSameType(
+ const AllocatedOperand& op,
+ const InstructionSequenceTest::TestOperand& test_op) {
+ bool test_op_is_reg =
+ (test_op.type_ ==
+ InstructionSequenceTest::TestOperandType::kFixedRegister ||
+ test_op.type_ == InstructionSequenceTest::TestOperandType::kRegister);
+
+ return (op.IsRegister() && test_op_is_reg) ||
+ (op.IsStackSlot() && !test_op_is_reg);
+}
+
+
+bool AllocatedOperandMatches(
+ const AllocatedOperand& op,
+ const InstructionSequenceTest::TestOperand& test_op) {
+ return AreOperandsOfSameType(op, test_op) &&
+ (op.index() == test_op.value_ ||
+ test_op.value_ == InstructionSequenceTest::kNoValue);
+}
+
+
+int GetParallelMoveCount(int instr_index, Instruction::GapPosition gap_pos,
+ const InstructionSequence* sequence) {
+ const ParallelMove* moves =
+ sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
+ if (moves == nullptr) return 0;
+ return GetMoveCount(*moves);
+}
+
+
+bool IsParallelMovePresent(int instr_index, Instruction::GapPosition gap_pos,
+ const InstructionSequence* sequence,
+ const InstructionSequenceTest::TestOperand& src,
+ const InstructionSequenceTest::TestOperand& dest) {
+ const ParallelMove* moves =
+ sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
+ EXPECT_NE(nullptr, moves);
+
+ bool found_match = false;
+ for (auto move : *moves) {
+ if (move->IsEliminated() || move->IsRedundant()) continue;
+ if (AllocatedOperandMatches(AllocatedOperand::cast(move->source()), src) &&
+ AllocatedOperandMatches(AllocatedOperand::cast(move->destination()),
+ dest)) {
+ found_match = true;
+ break;
+ }
+ }
+ return found_match;
+}
+}
+
+
class RegisterAllocatorTest : public InstructionSequenceTest {
public:
void Allocate() {
@@ -492,6 +561,144 @@ TEST_F(RegisterAllocatorTest, RegressionLoadConstantBeforeSpill) {
}
+TEST_F(RegisterAllocatorTest, DiamondWithCallFirstBlock) {
+ StartBlock();
+ auto x = EmitOI(Reg(0));
+ EndBlock(Branch(Reg(x), 1, 2));
+
+ StartBlock();
+ EmitCall(Slot(-1));
+ auto occupy = EmitOI(Reg(0));
+ EndBlock(Jump(2));
+
+ StartBlock();
+ EndBlock(FallThrough());
+
+ StartBlock();
+ Use(occupy);
+ Return(Reg(x));
+ EndBlock();
+ Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, DiamondWithCallSecondBlock) {
+ StartBlock();
+ auto x = EmitOI(Reg(0));
+ EndBlock(Branch(Reg(x), 1, 2));
+
+ StartBlock();
+ EndBlock(Jump(2));
+
+ StartBlock();
+ EmitCall(Slot(-1));
+ auto occupy = EmitOI(Reg(0));
+ EndBlock(FallThrough());
+
+ StartBlock();
+ Use(occupy);
+ Return(Reg(x));
+ EndBlock();
+ Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, SingleDeferredBlockSpill) {
+ StartBlock(); // B0
+ auto var = EmitOI(Reg(0));
+ EndBlock(Branch(Reg(var), 1, 2));
+
+ StartBlock(); // B1
+ EndBlock(Jump(2));
+
+ StartBlock(true); // B2
+ EmitCall(Slot(-1), Slot(var));
+ EndBlock();
+
+ StartBlock(); // B3
+ EmitNop();
+ EndBlock();
+
+ StartBlock(); // B4
+ Return(Reg(var, 0));
+ EndBlock();
+
+ Allocate();
+
+ const int var_def_index = 1;
+ const int call_index = 3;
+ int expect_no_moves =
+ FLAG_turbo_preprocess_ranges ? var_def_index : call_index;
+ int expect_spill_move =
+ FLAG_turbo_preprocess_ranges ? call_index : var_def_index;
+
+ // We should have no parallel moves at the "expect_no_moves" position.
+ EXPECT_EQ(
+ 0, GetParallelMoveCount(expect_no_moves, Instruction::START, sequence()));
+
+ // The spill should be performed at the position expect_spill_move.
+ EXPECT_TRUE(IsParallelMovePresent(expect_spill_move, Instruction::START,
+ sequence(), Reg(0), Slot(0)));
+}
+
+
+TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) {
+ if (!FLAG_turbo_preprocess_ranges) return;
+
+ StartBlock(); // B0
+ auto var1 = EmitOI(Reg(0));
+ auto var2 = EmitOI(Reg(1));
+ auto var3 = EmitOI(Reg(2));
+ EndBlock(Branch(Reg(var1, 0), 1, 2));
+
+ StartBlock(true); // B1
+ EmitCall(Slot(-2), Slot(var1));
+ EndBlock(Jump(2));
+
+ StartBlock(true); // B2
+ EmitCall(Slot(-1), Slot(var2));
+ EndBlock();
+
+ StartBlock(); // B3
+ EmitNop();
+ EndBlock();
+
+ StartBlock(); // B4
+ Return(Reg(var3, 2));
+ EndBlock();
+
+ const int def_of_v2 = 3;
+ const int call_in_b1 = 4;
+ const int call_in_b2 = 6;
+ const int end_of_b1 = 5;
+ const int end_of_b2 = 7;
+ const int start_of_b3 = 8;
+
+ Allocate();
+ // TODO(mtrofin): at the moment, the linear allocator spills var1 and var2,
+ // so only var3 is spilled in deferred blocks. Greedy avoids spilling 1&2.
+ // Expand the test once greedy is back online with this facility.
+ const int var3_reg = 2;
+ const int var3_slot = 2;
+
+ EXPECT_FALSE(IsParallelMovePresent(def_of_v2, Instruction::START, sequence(),
+ Reg(var3_reg), Slot()));
+ EXPECT_TRUE(IsParallelMovePresent(call_in_b1, Instruction::START, sequence(),
+ Reg(var3_reg), Slot(var3_slot)));
+ EXPECT_TRUE(IsParallelMovePresent(end_of_b1, Instruction::START, sequence(),
+ Slot(var3_slot), Reg()));
+
+ EXPECT_TRUE(IsParallelMovePresent(call_in_b2, Instruction::START, sequence(),
+ Reg(var3_reg), Slot(var3_slot)));
+ EXPECT_TRUE(IsParallelMovePresent(end_of_b2, Instruction::START, sequence(),
+ Slot(var3_slot), Reg()));
+
+
+ EXPECT_EQ(0,
+ GetParallelMoveCount(start_of_b3, Instruction::START, sequence()));
+}
+
+
namespace {
enum class ParameterType { kFixedSlot, kSlot, kRegister, kFixedRegister };
diff --git a/deps/v8/test/unittests/compiler/scheduler-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
index 45c636b27a..954541b721 100644
--- a/deps/v8/test/unittests/compiler/scheduler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
@@ -215,7 +215,7 @@ TEST_F(SchedulerRPOTest, EntryLoop) {
TEST_F(SchedulerRPOTest, EndLoop) {
Schedule schedule(zone());
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
CheckRPONumbers(order, 3, true);
@@ -225,7 +225,7 @@ TEST_F(SchedulerRPOTest, EndLoop) {
TEST_F(SchedulerRPOTest, EndLoopNested) {
Schedule schedule(zone());
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
schedule.AddSuccessorForTesting(loop1->last(), schedule.start());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
@@ -406,8 +406,8 @@ TEST_F(SchedulerRPOTest, LoopNest2) {
TEST_F(SchedulerRPOTest, LoopFollow1) {
Schedule schedule(zone());
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
- SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
+ base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
@@ -427,8 +427,8 @@ TEST_F(SchedulerRPOTest, LoopFollow1) {
TEST_F(SchedulerRPOTest, LoopFollow2) {
Schedule schedule(zone());
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
- SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
+ base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
BasicBlock* A = schedule.start();
BasicBlock* S = schedule.NewBasicBlock();
@@ -451,8 +451,8 @@ TEST_F(SchedulerRPOTest, LoopFollowN) {
for (int size = 1; size < 5; size++) {
for (int exit = 0; exit < size; exit++) {
Schedule schedule(zone());
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- SmartPointer<TestLoop> loop2(CreateLoop(&schedule, size));
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, size));
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
@@ -472,8 +472,8 @@ TEST_F(SchedulerRPOTest, LoopFollowN) {
TEST_F(SchedulerRPOTest, NestedLoopFollow1) {
Schedule schedule(zone());
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
- SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
+ base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
BasicBlock* A = schedule.start();
BasicBlock* B = schedule.NewBasicBlock();
@@ -506,7 +506,7 @@ TEST_F(SchedulerRPOTest, LoopBackedges1) {
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
schedule.AddSuccessorForTesting(A, loop1->header());
schedule.AddSuccessorForTesting(loop1->last(), E);
@@ -530,7 +530,7 @@ TEST_F(SchedulerRPOTest, LoopOutedges1) {
BasicBlock* D = schedule.NewBasicBlock();
BasicBlock* E = schedule.end();
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
schedule.AddSuccessorForTesting(A, loop1->header());
schedule.AddSuccessorForTesting(loop1->last(), E);
@@ -553,7 +553,7 @@ TEST_F(SchedulerRPOTest, LoopOutedges2) {
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
schedule.AddSuccessorForTesting(A, loop1->header());
schedule.AddSuccessorForTesting(loop1->last(), E);
@@ -576,7 +576,7 @@ TEST_F(SchedulerRPOTest, LoopOutloops1) {
Schedule schedule(zone());
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
- SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
schedule.AddSuccessorForTesting(A, loop1->header());
schedule.AddSuccessorForTesting(loop1->last(), E);
diff --git a/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc b/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc
index 449299bb1d..7257cc9802 100644
--- a/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc
@@ -27,10 +27,11 @@ class TailCallOptimizationTest : public GraphTest {
TEST_F(TailCallOptimizationTest, CallCodeObject0) {
MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
- LinkageLocation kLocationSignature[] = {LinkageLocation(0),
- LinkageLocation(1)};
+ LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
+ LinkageLocation::ForRegister(1)};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
- CallDescriptor::kCallCodeObject, kMachAnyTagged, LinkageLocation(0),
+ CallDescriptor::kCallCodeObject, kMachAnyTagged,
+ LinkageLocation::ForRegister(0),
new (zone()) MachineSignature(1, 1, kMachineSignature),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kNoFlags);
@@ -47,10 +48,11 @@ TEST_F(TailCallOptimizationTest, CallCodeObject0) {
TEST_F(TailCallOptimizationTest, CallCodeObject1) {
MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
- LinkageLocation kLocationSignature[] = {LinkageLocation(0),
- LinkageLocation(1)};
+ LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
+ LinkageLocation::ForRegister(1)};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
- CallDescriptor::kCallCodeObject, kMachAnyTagged, LinkageLocation(0),
+ CallDescriptor::kCallCodeObject, kMachAnyTagged,
+ LinkageLocation::ForRegister(0),
new (zone()) MachineSignature(1, 1, kMachineSignature),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kSupportsTailCalls);
@@ -71,10 +73,11 @@ TEST_F(TailCallOptimizationTest, CallCodeObject1) {
TEST_F(TailCallOptimizationTest, CallCodeObject2) {
MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
- LinkageLocation kLocationSignature[] = {LinkageLocation(0),
- LinkageLocation(1)};
+ LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
+ LinkageLocation::ForRegister(1)};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
- CallDescriptor::kCallCodeObject, kMachAnyTagged, LinkageLocation(0),
+ CallDescriptor::kCallCodeObject, kMachAnyTagged,
+ LinkageLocation::ForRegister(0),
new (zone()) MachineSignature(1, 1, kMachineSignature),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kSupportsTailCalls);
@@ -93,10 +96,11 @@ TEST_F(TailCallOptimizationTest, CallCodeObject2) {
TEST_F(TailCallOptimizationTest, CallJSFunction0) {
MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
- LinkageLocation kLocationSignature[] = {LinkageLocation(0),
- LinkageLocation(1)};
+ LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
+ LinkageLocation::ForRegister(1)};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
- CallDescriptor::kCallJSFunction, kMachAnyTagged, LinkageLocation(0),
+ CallDescriptor::kCallJSFunction, kMachAnyTagged,
+ LinkageLocation::ForRegister(0),
new (zone()) MachineSignature(1, 1, kMachineSignature),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kNoFlags);
@@ -113,10 +117,11 @@ TEST_F(TailCallOptimizationTest, CallJSFunction0) {
TEST_F(TailCallOptimizationTest, CallJSFunction1) {
MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
- LinkageLocation kLocationSignature[] = {LinkageLocation(0),
- LinkageLocation(1)};
+ LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
+ LinkageLocation::ForRegister(1)};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
- CallDescriptor::kCallJSFunction, kMachAnyTagged, LinkageLocation(0),
+ CallDescriptor::kCallJSFunction, kMachAnyTagged,
+ LinkageLocation::ForRegister(0),
new (zone()) MachineSignature(1, 1, kMachineSignature),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kSupportsTailCalls);
@@ -137,10 +142,11 @@ TEST_F(TailCallOptimizationTest, CallJSFunction1) {
TEST_F(TailCallOptimizationTest, CallJSFunction2) {
MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
- LinkageLocation kLocationSignature[] = {LinkageLocation(0),
- LinkageLocation(1)};
+ LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
+ LinkageLocation::ForRegister(1)};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
- CallDescriptor::kCallJSFunction, kMachAnyTagged, LinkageLocation(0),
+ CallDescriptor::kCallJSFunction, kMachAnyTagged,
+ LinkageLocation::ForRegister(0),
new (zone()) MachineSignature(1, 1, kMachineSignature),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kSupportsTailCalls);
diff --git a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
index c75fde492e..e74152a5fd 100644
--- a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
@@ -156,6 +156,19 @@ TEST_F(GCIdleTimeHandlerTest, DoScavengeLowScavengeSpeed) {
}
+TEST_F(GCIdleTimeHandlerTest, DoScavengeLowAllocationRate) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ heap_state.used_new_space_size = kNewSpaceCapacity;
+ heap_state.new_space_allocation_throughput_in_bytes_per_ms =
+ GCIdleTimeHandler::kLowAllocationThroughput - 1;
+ int idle_time_ms = 16;
+ EXPECT_TRUE(GCIdleTimeHandler::ShouldDoScavenge(
+ idle_time_ms, heap_state.new_space_capacity,
+ heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
+ heap_state.new_space_allocation_throughput_in_bytes_per_ms));
+}
+
+
TEST_F(GCIdleTimeHandlerTest, DoScavengeHighScavengeSpeed) {
GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
heap_state.used_new_space_size = kNewSpaceCapacity;
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
new file mode 100644
index 0000000000..aead34770c
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -0,0 +1,137 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interpreter/bytecode-array-builder.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeArrayBuilderTest : public TestWithIsolate {
+ public:
+ BytecodeArrayBuilderTest() {}
+ ~BytecodeArrayBuilderTest() override {}
+};
+
+
+TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
+ BytecodeArrayBuilder builder(isolate());
+
+ builder.set_locals_count(1);
+ CHECK_EQ(builder.locals_count(), 1);
+
+ // Emit constant loads.
+ builder.LoadLiteral(Smi::FromInt(0))
+ .LoadLiteral(Smi::FromInt(8))
+ .LoadUndefined()
+ .LoadNull()
+ .LoadTheHole()
+ .LoadTrue()
+ .LoadFalse();
+
+ // Emit accumulator transfers.
+ Register reg(0);
+ builder.LoadAccumulatorWithRegister(reg).StoreAccumulatorInRegister(reg);
+
+ // Emit binary operators invocations.
+ builder.BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::SUB, reg)
+ .BinaryOperation(Token::Value::MUL, reg)
+ .BinaryOperation(Token::Value::DIV, reg);
+
+ // Emit control flow. Return must be the last instruction.
+ builder.Return();
+
+ // Generate BytecodeArray.
+ Handle<BytecodeArray> the_array = builder.ToBytecodeArray();
+ CHECK_EQ(the_array->frame_size(), builder.locals_count() * kPointerSize);
+
+ // Build scorecard of bytecodes encountered in the BytecodeArray.
+ std::vector<int> scorecard(Bytecodes::ToByte(Bytecode::kLast) + 1);
+ Bytecode final_bytecode = Bytecode::kLdaZero;
+ for (int i = 0; i < the_array->length(); i++) {
+ uint8_t code = the_array->get(i);
+ scorecard[code] += 1;
+ int operands = Bytecodes::NumberOfOperands(Bytecodes::FromByte(code));
+ CHECK_LE(operands, Bytecodes::MaximumNumberOfOperands());
+ final_bytecode = Bytecodes::FromByte(code);
+ i += operands;
+ }
+
+ // Check return occurs at the end and only once in the BytecodeArray.
+ CHECK_EQ(final_bytecode, Bytecode::kReturn);
+ CHECK_EQ(scorecard[Bytecodes::ToByte(final_bytecode)], 1);
+
+#define CHECK_BYTECODE_PRESENT(Name, ...) \
+ /* Check Bytecode is marked in scorecard */ \
+ CHECK_GE(scorecard[Bytecodes::ToByte(Bytecode::k##Name)], 1);
+ BYTECODE_LIST(CHECK_BYTECODE_PRESENT)
+#undef CHECK_BYTECODE_PRESENT
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
+ for (int locals = 0; locals < 5; locals++) {
+ for (int temps = 0; temps < 3; temps++) {
+ BytecodeArrayBuilder builder(isolate());
+ builder.set_locals_count(locals);
+ builder.Return();
+
+ TemporaryRegisterScope temporaries(&builder);
+ for (int i = 0; i < temps; i++) {
+ temporaries.NewRegister();
+ }
+
+ Handle<BytecodeArray> the_array = builder.ToBytecodeArray();
+ int total_registers = locals + temps;
+ CHECK_EQ(the_array->frame_size(), total_registers * kPointerSize);
+ }
+ }
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, TemporariesRecycled) {
+ BytecodeArrayBuilder builder(isolate());
+ builder.set_locals_count(0);
+ builder.Return();
+
+ int first;
+ {
+ TemporaryRegisterScope temporaries(&builder);
+ first = temporaries.NewRegister().index();
+ temporaries.NewRegister();
+ temporaries.NewRegister();
+ temporaries.NewRegister();
+ }
+
+ int second;
+ {
+ TemporaryRegisterScope temporaries(&builder);
+ second = temporaries.NewRegister().index();
+ }
+
+ CHECK_EQ(first, second);
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, RegisterValues) {
+ int index = 1;
+ uint8_t operand = static_cast<uint8_t>(-index);
+
+ Register the_register(index);
+ CHECK_EQ(the_register.index(), index);
+
+ int actual_operand = the_register.ToOperand();
+ CHECK_EQ(actual_operand, operand);
+
+ int actual_index = Register::FromOperand(actual_operand).index();
+ CHECK_EQ(actual_index, index);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index fd52a4b21d..7d04215143 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -4,10 +4,12 @@
#include "test/unittests/test-utils.h"
+#include "include/libplatform/libplatform.h"
#include "src/base/platform/time.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
#include "src/flags.h"
#include "src/isolate.h"
+#include "src/v8.h"
namespace v8 {
@@ -51,6 +53,9 @@ void TestWithIsolate::SetUpTestCase() {
// static
void TestWithIsolate::TearDownTestCase() {
ASSERT_TRUE(isolate_ != NULL);
+ v8::Platform* platform = internal::V8::GetCurrentPlatform();
+ ASSERT_TRUE(platform != NULL);
+ while (platform::PumpMessageLoop(platform, isolate_)) continue;
isolate_->Dispose();
isolate_ = NULL;
delete array_buffer_allocator_;
@@ -75,7 +80,7 @@ inline int64_t GetRandomSeedFromFlag(int random_seed) {
} // namespace
TestWithRandomNumberGenerator::TestWithRandomNumberGenerator()
- : rng_(GetRandomSeedFromFlag(internal::FLAG_random_seed)) {}
+ : rng_(GetRandomSeedFromFlag(::v8::internal::FLAG_random_seed)) {}
TestWithRandomNumberGenerator::~TestWithRandomNumberGenerator() {}
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
index 9698fb760d..60a5dea888 100644
--- a/deps/v8/test/unittests/unittests.gyp
+++ b/deps/v8/test/unittests/unittests.gyp
@@ -22,6 +22,10 @@
'include_dirs': [
'../..',
],
+ 'defines': [
+ # TODO(jochen): Remove again after this is globally turned on.
+ 'V8_IMMINENT_DEPRECATION_WARNINGS',
+ ],
'sources': [ ### gcmole(all) ###
'base/bits-unittest.cc',
'base/cpu-unittest.cc',
@@ -39,6 +43,7 @@
'base/utils/random-number-generator-unittest.cc',
'char-predicates-unittest.cc',
'compiler/change-lowering-unittest.cc',
+ 'compiler/coalesced-live-ranges-unittest.cc',
'compiler/common-operator-reducer-unittest.cc',
'compiler/common-operator-unittest.cc',
'compiler/compiler-test-utils.h',
@@ -55,11 +60,15 @@
'compiler/instruction-selector-unittest.h',
'compiler/instruction-sequence-unittest.cc',
'compiler/instruction-sequence-unittest.h',
+ 'compiler/interpreter-assembler-unittest.cc',
+ 'compiler/interpreter-assembler-unittest.h',
'compiler/js-builtin-reducer-unittest.cc',
+ 'compiler/js-context-relaxation-unittest.cc',
'compiler/js-intrinsic-lowering-unittest.cc',
'compiler/js-operator-unittest.cc',
'compiler/js-typed-lowering-unittest.cc',
'compiler/js-type-feedback-unittest.cc',
+ 'compiler/linkage-tail-call-unittest.cc',
'compiler/liveness-analyzer-unittest.cc',
'compiler/load-elimination-unittest.cc',
'compiler/loop-peeling-unittest.cc',
@@ -85,6 +94,7 @@
'compiler/value-numbering-reducer-unittest.cc',
'compiler/zone-pool-unittest.cc',
'counters-unittest.cc',
+ 'interpreter/bytecode-array-builder-unittest.cc',
'libplatform/default-platform-unittest.cc',
'libplatform/task-queue-unittest.cc',
'libplatform/worker-thread-unittest.cc',
@@ -153,6 +163,11 @@
],
},
}],
+ ['v8_wasm!=0', {
+ 'dependencies': [
+ '../../third_party/wasm/test/unittests/wasm/wasm.gyp:wasm_unittests',
+ ],
+ }],
],
},
],
diff --git a/deps/v8/test/webkit/class-syntax-name-expected.txt b/deps/v8/test/webkit/class-syntax-name-expected.txt
index 10f38ff2c2..ed49be3309 100644
--- a/deps/v8/test/webkit/class-syntax-name-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-name-expected.txt
@@ -108,7 +108,7 @@ PASS 'use strict'; var VarA = class A { constructor() {} }; var VarB = class B e
Class statement binding in other circumstances
PASS var result = A; result threw exception ReferenceError: A is not defined.
PASS 'use strict'; var result = A; result threw exception ReferenceError: A is not defined.
-FAIL var result = A; class A {}; result should throw an exception. Was undefined.
+PASS var result = A; class A {}; result threw exception ReferenceError: A is not defined.
PASS 'use strict'; var result = A; class A {}; result threw exception ReferenceError: A is not defined.
PASS class A { constructor() { A = 1; } }; new A threw exception TypeError: Assignment to constant variable..
PASS 'use strict'; class A { constructor() { A = 1; } }; new A threw exception TypeError: Assignment to constant variable..
@@ -118,7 +118,7 @@ PASS class A {}; var result = A; result did not throw exception.
PASS 'use strict'; class A {}; var result = A; result did not throw exception.
PASS eval('var Foo = 10'); Foo is 10
PASS 'use strict'; eval('var Foo = 10'); Foo threw exception ReferenceError: Foo is not defined.
-PASS eval('class Bar { constructor() {} }'); Bar.toString() is 'class Bar { constructor() {} }'
+PASS eval('class Bar { constructor() {} }; Bar.toString()') is 'class Bar { constructor() {} }'
PASS 'use strict'; eval('class Bar { constructor() {} }'); Bar.toString() threw exception ReferenceError: Bar is not defined.
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/class-syntax-name.js b/deps/v8/test/webkit/class-syntax-name.js
index 09faa3a54a..16045651ef 100644
--- a/deps/v8/test/webkit/class-syntax-name.js
+++ b/deps/v8/test/webkit/class-syntax-name.js
@@ -111,5 +111,5 @@ runTestShouldBe("class A { constructor() { } }; A = 1; A", "1");
runTestShouldNotThrow("class A {}; var result = A; result");
shouldBe("eval('var Foo = 10'); Foo", "10");
shouldThrow("'use strict'; eval('var Foo = 10'); Foo");
-shouldBe("eval('class Bar { constructor() {} }'); Bar.toString()", "'class Bar { constructor() {} }'");
+shouldBe("eval('class Bar { constructor() {} }; Bar.toString()')", "'class Bar { constructor() {} }'");
shouldThrow("'use strict'; eval('class Bar { constructor() {} }'); Bar.toString()");
diff --git a/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt b/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt
deleted file mode 100644
index 7731a98671..0000000000
--- a/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt
+++ /dev/null
@@ -1,94 +0,0 @@
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-Test to ensure correct behaviour of Object.getOwnPropertyNames
-
-On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-
-
-PASS getSortedOwnPropertyNames({}) is []
-PASS getSortedOwnPropertyNames({a:null}) is ['a']
-PASS getSortedOwnPropertyNames({a:null, b:null}) is ['a', 'b']
-PASS getSortedOwnPropertyNames({b:null, a:null}) is ['a', 'b']
-PASS getSortedOwnPropertyNames({__proto__:{a:null}}) is []
-PASS getSortedOwnPropertyNames({__proto__:[1,2,3]}) is []
-PASS getSortedOwnPropertyNames(Object.create({}, { 'a': { 'value': 1, 'enumerable': false } })) is ['a']
-PASS getSortedOwnPropertyNames(Object.create([1,2,3], { 'a': { 'value': 1, 'enumerable': false } })) is ['a']
-PASS getSortedOwnPropertyNames(new Function()) is ['arguments', 'caller', 'length', 'name', 'prototype']
-PASS getSortedOwnPropertyNames((function(){var x=new Function();x.__proto__=[1,2,3];return x;})()) is ['arguments', 'caller', 'length', 'name', 'prototype']
-PASS getSortedOwnPropertyNames(new String('')) is ['length']
-PASS getSortedOwnPropertyNames(new String('a')) is ['0', 'length']
-PASS getSortedOwnPropertyNames(new String('abc')) is ['0', '1', '2', 'length']
-PASS getSortedOwnPropertyNames((function(){var x=new String('');x.__proto__=[1,2,3];return x;})()) is ['length']
-PASS getSortedOwnPropertyNames([]) is ['length']
-PASS getSortedOwnPropertyNames([null]) is ['0', 'length']
-PASS getSortedOwnPropertyNames([null,null]) is ['0','1', 'length']
-PASS getSortedOwnPropertyNames([null,null,,,,null]) is ['0','1','5', 'length']
-PASS getSortedOwnPropertyNames((function(){var x=[];x.__proto__=[1,2,3];return x;})()) is ['length']
-PASS getSortedOwnPropertyNames(new Date()) is []
-PASS getSortedOwnPropertyNames((function(){var x=new Date();x.__proto__=[1,2,3];return x;})()) is []
-PASS getSortedOwnPropertyNames(new RegExp('foo')) is ['global', 'ignoreCase', 'lastIndex', 'multiline', 'source']
-PASS getSortedOwnPropertyNames((function(){var x=new RegExp();x.__proto__=[1,2,3];return x;})()) is ['global', 'ignoreCase', 'lastIndex', 'multiline', 'source']
-PASS getSortedOwnPropertyNames(argumentsObject()) is ['callee', 'length']
-PASS getSortedOwnPropertyNames(argumentsObject(1)) is ['0', 'callee', 'length']
-PASS getSortedOwnPropertyNames(argumentsObject(1,2,3)) is ['0', '1', '2', 'callee', 'length']
-PASS getSortedOwnPropertyNames((function(){arguments.__proto__=[1,2,3];return arguments;})()) is ['callee', 'length']
-PASS getSortedOwnPropertyNames(parseInt) is ['arguments', 'caller', 'length', 'name']
-PASS getSortedOwnPropertyNames(parseFloat) is ['arguments', 'caller', 'length', 'name']
-PASS getSortedOwnPropertyNames(isNaN) is ['arguments', 'caller', 'length', 'name']
-PASS getSortedOwnPropertyNames(isFinite) is ['arguments', 'caller', 'length', 'name']
-PASS getSortedOwnPropertyNames(escape) is ['length', 'name']
-PASS getSortedOwnPropertyNames(unescape) is ['length', 'name']
-PASS getSortedOwnPropertyNames(decodeURI) is ['length', 'name']
-PASS getSortedOwnPropertyNames(decodeURIComponent) is ['length', 'name']
-PASS getSortedOwnPropertyNames(encodeURI) is ['length', 'name']
-PASS getSortedOwnPropertyNames(encodeURIComponent) is ['length', 'name']
-PASS getSortedOwnPropertyNames(Object) is ['arguments', 'assign', 'caller', 'create', 'defineProperties', 'defineProperty', 'deliverChangeRecords', 'freeze', 'getNotifier', 'getOwnPropertyDescriptor', 'getOwnPropertyNames', 'getOwnPropertySymbols', 'getPrototypeOf', 'is', 'isExtensible', 'isFrozen', 'isSealed', 'keys', 'length', 'name', 'observe', 'preventExtensions', 'prototype', 'seal', 'setPrototypeOf', 'unobserve']
-PASS getSortedOwnPropertyNames(Object.prototype) is ['__defineGetter__', '__defineSetter__', '__lookupGetter__', '__lookupSetter__', '__proto__', 'constructor', 'hasOwnProperty', 'isPrototypeOf', 'propertyIsEnumerable', 'toLocaleString', 'toString', 'valueOf']
-PASS getSortedOwnPropertyNames(Function) is ['arguments', 'caller', 'length', 'name', 'prototype']
-PASS getSortedOwnPropertyNames(Function.prototype) is ['apply', 'arguments', 'bind', 'call', 'caller', 'constructor', 'length', 'name', 'toString']
-PASS getSortedOwnPropertyNames(Array) is ['arguments', 'caller', 'from', 'isArray', 'length', 'name', 'observe', 'of', 'prototype', 'unobserve']
-PASS getSortedOwnPropertyNames(Array.prototype) is ['concat', 'constructor', 'copyWithin', 'entries', 'every', 'fill', 'filter', 'find', 'findIndex', 'forEach', 'indexOf', 'join', 'keys', 'lastIndexOf', 'length', 'map', 'pop', 'push', 'reduce', 'reduceRight', 'reverse', 'shift', 'slice', 'some', 'sort', 'splice', 'toLocaleString', 'toString', 'unshift']
-PASS getSortedOwnPropertyNames(String) is ['arguments', 'caller', 'fromCharCode', 'fromCodePoint', 'length', 'name', 'prototype', 'raw']
-PASS getSortedOwnPropertyNames(String.prototype) is ['anchor', 'big', 'blink', 'bold', 'charAt', 'charCodeAt', 'codePointAt', 'concat', 'constructor', 'endsWith', 'fixed', 'fontcolor', 'fontsize', 'includes', 'indexOf', 'italics', 'lastIndexOf', 'length', 'link', 'localeCompare', 'match', 'normalize', 'repeat', 'replace', 'search', 'slice', 'small', 'split', 'startsWith', 'strike', 'sub', 'substr', 'substring', 'sup', 'toLocaleLowerCase', 'toLocaleUpperCase', 'toLowerCase', 'toString', 'toUpperCase', 'trim', 'trimLeft', 'trimRight', 'valueOf']
-PASS getSortedOwnPropertyNames(Boolean) is ['arguments', 'caller', 'length', 'name', 'prototype']
-PASS getSortedOwnPropertyNames(Boolean.prototype) is ['constructor', 'toString', 'valueOf']
-PASS getSortedOwnPropertyNames(Number) is ['EPSILON', 'MAX_SAFE_INTEGER', 'MAX_VALUE', 'MIN_SAFE_INTEGER', 'MIN_VALUE', 'NEGATIVE_INFINITY', 'NaN', 'POSITIVE_INFINITY', 'arguments', 'caller', 'isFinite', 'isInteger', 'isNaN', 'isSafeInteger', 'length', 'name', 'parseFloat', 'parseInt', 'prototype']
-PASS getSortedOwnPropertyNames(Number.prototype) is ['constructor', 'toExponential', 'toFixed', 'toLocaleString', 'toPrecision', 'toString', 'valueOf']
-PASS getSortedOwnPropertyNames(Date) is ['UTC', 'arguments', 'caller', 'length', 'name', 'now', 'parse', 'prototype']
-PASS getSortedOwnPropertyNames(Date.prototype) is ['constructor', 'getDate', 'getDay', 'getFullYear', 'getHours', 'getMilliseconds', 'getMinutes', 'getMonth', 'getSeconds', 'getTime', 'getTimezoneOffset', 'getUTCDate', 'getUTCDay', 'getUTCFullYear', 'getUTCHours', 'getUTCMilliseconds', 'getUTCMinutes', 'getUTCMonth', 'getUTCSeconds', 'getYear', 'setDate', 'setFullYear', 'setHours', 'setMilliseconds', 'setMinutes', 'setMonth', 'setSeconds', 'setTime', 'setUTCDate', 'setUTCFullYear', 'setUTCHours', 'setUTCMilliseconds', 'setUTCMinutes', 'setUTCMonth', 'setUTCSeconds', 'setYear', 'toDateString', 'toGMTString', 'toISOString', 'toJSON', 'toLocaleDateString', 'toLocaleString', 'toLocaleTimeString', 'toString', 'toTimeString', 'toUTCString', 'valueOf']
-PASS getSortedOwnPropertyNames(RegExp) is ['$&', "$'", '$*', '$+', '$1', '$2', '$3', '$4', '$5', '$6', '$7', '$8', '$9', '$_', '$`', 'arguments', 'caller', 'input', 'lastMatch', 'lastParen', 'leftContext', 'length', 'multiline', 'name', 'prototype', 'rightContext']
-PASS getSortedOwnPropertyNames(RegExp.prototype) is ['compile', 'constructor', 'exec', 'global', 'ignoreCase', 'lastIndex', 'multiline', 'source', 'test', 'toString']
-PASS getSortedOwnPropertyNames(Error) is ['arguments', 'caller', 'captureStackTrace', 'length', 'name', 'prototype', 'stackTraceLimit']
-PASS getSortedOwnPropertyNames(Error.prototype) is ['constructor', 'message', 'name', 'toString']
-PASS getSortedOwnPropertyNames(Math) is ['E', 'LN10', 'LN2', 'LOG10E', 'LOG2E', 'PI', 'SQRT1_2', 'SQRT2', 'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'cbrt', 'ceil', 'clz32', 'cos', 'cosh', 'exp', 'expm1', 'floor', 'fround', 'hypot', 'imul', 'log', 'log10', 'log1p', 'log2', 'max', 'min', 'pow', 'random', 'round', 'sign', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc']
-PASS getSortedOwnPropertyNames(JSON) is ['parse', 'stringify']
-PASS globalPropertyNames.indexOf('NaN') != -1 is true
-PASS globalPropertyNames.indexOf('Infinity') != -1 is true
-PASS globalPropertyNames.indexOf('undefined') != -1 is true
-PASS globalPropertyNames.indexOf('parseInt') != -1 is true
-PASS globalPropertyNames.indexOf('parseFloat') != -1 is true
-PASS globalPropertyNames.indexOf('isNaN') != -1 is true
-PASS globalPropertyNames.indexOf('isFinite') != -1 is true
-PASS globalPropertyNames.indexOf('escape') != -1 is true
-PASS globalPropertyNames.indexOf('unescape') != -1 is true
-PASS globalPropertyNames.indexOf('decodeURI') != -1 is true
-PASS globalPropertyNames.indexOf('decodeURIComponent') != -1 is true
-PASS globalPropertyNames.indexOf('encodeURI') != -1 is true
-PASS globalPropertyNames.indexOf('encodeURIComponent') != -1 is true
-PASS globalPropertyNames.indexOf('Object') != -1 is true
-PASS globalPropertyNames.indexOf('Function') != -1 is true
-PASS globalPropertyNames.indexOf('Array') != -1 is true
-PASS globalPropertyNames.indexOf('String') != -1 is true
-PASS globalPropertyNames.indexOf('Boolean') != -1 is true
-PASS globalPropertyNames.indexOf('Number') != -1 is true
-PASS globalPropertyNames.indexOf('Date') != -1 is true
-PASS globalPropertyNames.indexOf('RegExp') != -1 is true
-PASS globalPropertyNames.indexOf('Error') != -1 is true
-PASS globalPropertyNames.indexOf('Math') != -1 is true
-PASS globalPropertyNames.indexOf('JSON') != -1 is true
-PASS successfullyParsed is true
-
-TEST COMPLETE
-
diff --git a/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames.js b/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames.js
deleted file mode 100644
index e34562f5ba..0000000000
--- a/deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames.js
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
-// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-description("Test to ensure correct behaviour of Object.getOwnPropertyNames");
-
-function argumentsObject() { return arguments; };
-
-var expectedPropertyNamesSet = {
- "{}": "[]",
- "{a:null}": "['a']",
- "{a:null, b:null}": "['a', 'b']",
- "{b:null, a:null}": "['a', 'b']",
- "{__proto__:{a:null}}": "[]",
- "{__proto__:[1,2,3]}": "[]",
- "Object.create({}, { 'a': { 'value': 1, 'enumerable': false } })": "['a']",
- "Object.create([1,2,3], { 'a': { 'value': 1, 'enumerable': false } })": "['a']",
-// Function objects
- "new Function()": "['arguments', 'caller', 'length', 'name', 'prototype']",
- "(function(){var x=new Function();x.__proto__=[1,2,3];return x;})()": "['arguments', 'caller', 'length', 'name', 'prototype']",
-// String objects
- "new String('')": "['length']",
- "new String('a')": "['0', 'length']",
- "new String('abc')": "['0', '1', '2', 'length']",
- "(function(){var x=new String('');x.__proto__=[1,2,3];return x;})()": "['length']",
-// Array objects
- "[]": "['length']",
- "[null]": "['0', 'length']",
- "[null,null]": "['0','1', 'length']",
- "[null,null,,,,null]": "['0','1','5', 'length']",
- "(function(){var x=[];x.__proto__=[1,2,3];return x;})()": "['length']",
-// Date objects
- "new Date()": "[]",
- "(function(){var x=new Date();x.__proto__=[1,2,3];return x;})()": "[]",
-// RegExp objects
- "new RegExp('foo')": "['global', 'ignoreCase', 'lastIndex', 'multiline', 'source']",
- "(function(){var x=new RegExp();x.__proto__=[1,2,3];return x;})()": "['global', 'ignoreCase', 'lastIndex', 'multiline', 'source']",
-// Arguments objects
- "argumentsObject()": "['callee', 'length']",
- "argumentsObject(1)": "['0', 'callee', 'length']",
- "argumentsObject(1,2,3)": "['0', '1', '2', 'callee', 'length']",
- "(function(){arguments.__proto__=[1,2,3];return arguments;})()": "['callee', 'length']",
-// Built-in ECMA functions
- "parseInt": "['arguments', 'caller', 'length', 'name']",
- "parseFloat": "['arguments', 'caller', 'length', 'name']",
- "isNaN": "['arguments', 'caller', 'length', 'name']",
- "isFinite": "['arguments', 'caller', 'length', 'name']",
- "escape": "['length', 'name']",
- "unescape": "['length', 'name']",
- "decodeURI": "['length', 'name']",
- "decodeURIComponent": "['length', 'name']",
- "encodeURI": "['length', 'name']",
- "encodeURIComponent": "['length', 'name']",
-// Built-in ECMA objects
- "Object": "['arguments', 'assign', 'caller', 'create', 'defineProperties', 'defineProperty', 'deliverChangeRecords', 'freeze', 'getNotifier', 'getOwnPropertyDescriptor', 'getOwnPropertyNames', 'getOwnPropertySymbols', 'getPrototypeOf', 'is', 'isExtensible', 'isFrozen', 'isSealed', 'keys', 'length', 'name', 'observe', 'preventExtensions', 'prototype', 'seal', 'setPrototypeOf', 'unobserve']",
- "Object.prototype": "['__defineGetter__', '__defineSetter__', '__lookupGetter__', '__lookupSetter__', '__proto__', 'constructor', 'hasOwnProperty', 'isPrototypeOf', 'propertyIsEnumerable', 'toLocaleString', 'toString', 'valueOf']",
- "Function": "['arguments', 'caller', 'length', 'name', 'prototype']",
- "Function.prototype": "['apply', 'arguments', 'bind', 'call', 'caller', 'constructor', 'length', 'name', 'toString']",
- "Array": "['arguments', 'caller', 'from', 'isArray', 'length', 'name', 'observe', 'of', 'prototype', 'unobserve']",
- "Array.prototype": "['concat', 'constructor', 'copyWithin', 'entries', 'every', 'fill', 'filter', 'find', 'findIndex', 'forEach', 'indexOf', 'join', 'keys', 'lastIndexOf', 'length', 'map', 'pop', 'push', 'reduce', 'reduceRight', 'reverse', 'shift', 'slice', 'some', 'sort', 'splice', 'toLocaleString', 'toString', 'unshift']",
- "String": "['arguments', 'caller', 'fromCharCode', 'fromCodePoint', 'length', 'name', 'prototype', 'raw']",
- "String.prototype": "['anchor', 'big', 'blink', 'bold', 'charAt', 'charCodeAt', 'codePointAt', 'concat', 'constructor', 'endsWith', 'fixed', 'fontcolor', 'fontsize', 'includes', 'indexOf', 'italics', 'lastIndexOf', 'length', 'link', 'localeCompare', 'match', 'normalize', 'repeat', 'replace', 'search', 'slice', 'small', 'split', 'startsWith', 'strike', 'sub', 'substr', 'substring', 'sup', 'toLocaleLowerCase', 'toLocaleUpperCase', 'toLowerCase', 'toString', 'toUpperCase', 'trim', 'trimLeft', 'trimRight', 'valueOf']",
- "Boolean": "['arguments', 'caller', 'length', 'name', 'prototype']",
- "Boolean.prototype": "['constructor', 'toString', 'valueOf']",
- "Number": "['EPSILON', 'MAX_SAFE_INTEGER', 'MAX_VALUE', 'MIN_SAFE_INTEGER', 'MIN_VALUE', 'NEGATIVE_INFINITY', 'NaN', 'POSITIVE_INFINITY', 'arguments', 'caller', 'isFinite', 'isInteger', 'isNaN', 'isSafeInteger', 'length', 'name', 'parseFloat', 'parseInt', 'prototype']",
- "Number.prototype": "['constructor', 'toExponential', 'toFixed', 'toLocaleString', 'toPrecision', 'toString', 'valueOf']",
- "Date": "['UTC', 'arguments', 'caller', 'length', 'name', 'now', 'parse', 'prototype']",
- "Date.prototype": "['constructor', 'getDate', 'getDay', 'getFullYear', 'getHours', 'getMilliseconds', 'getMinutes', 'getMonth', 'getSeconds', 'getTime', 'getTimezoneOffset', 'getUTCDate', 'getUTCDay', 'getUTCFullYear', 'getUTCHours', 'getUTCMilliseconds', 'getUTCMinutes', 'getUTCMonth', 'getUTCSeconds', 'getYear', 'setDate', 'setFullYear', 'setHours', 'setMilliseconds', 'setMinutes', 'setMonth', 'setSeconds', 'setTime', 'setUTCDate', 'setUTCFullYear', 'setUTCHours', 'setUTCMilliseconds', 'setUTCMinutes', 'setUTCMonth', 'setUTCSeconds', 'setYear', 'toDateString', 'toGMTString', 'toISOString', 'toJSON', 'toLocaleDateString', 'toLocaleString', 'toLocaleTimeString', 'toString', 'toTimeString', 'toUTCString', 'valueOf']",
- "RegExp": "['$&', \"$'\", '$*', '$+', '$1', '$2', '$3', '$4', '$5', '$6', '$7', '$8', '$9', '$_', '$`', 'arguments', 'caller', 'input', 'lastMatch', 'lastParen', 'leftContext', 'length', 'multiline', 'name', 'prototype', 'rightContext']",
- "RegExp.prototype": "['compile', 'constructor', 'exec', 'global', 'ignoreCase', 'lastIndex', 'multiline', 'source', 'test', 'toString']",
- "Error": "['arguments', 'caller', 'captureStackTrace', 'length', 'name', 'prototype', 'stackTraceLimit']",
- "Error.prototype": "['constructor', 'message', 'name', 'toString']",
- "Math": "['E', 'LN10', 'LN2', 'LOG10E', 'LOG2E', 'PI', 'SQRT1_2', 'SQRT2', 'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'cbrt', 'ceil', 'clz32', 'cos', 'cosh', 'exp', 'expm1', 'floor', 'fround', 'hypot', 'imul', 'log', 'log10', 'log1p', 'log2', 'max', 'min', 'pow', 'random', 'round', 'sign', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc']",
- "JSON": "['parse', 'stringify']"
-};
-
-function getSortedOwnPropertyNames(obj)
-{
- return Object.getOwnPropertyNames(obj).sort();
-}
-
-for (var expr in expectedPropertyNamesSet)
- shouldBe("getSortedOwnPropertyNames(" + expr + ")", expectedPropertyNamesSet[expr]);
-
-// Global Object
-// Only check for ECMA properties here
-var globalPropertyNames = Object.getOwnPropertyNames(this);
-var expectedGlobalPropertyNames = [
- "NaN",
- "Infinity",
- "undefined",
- "parseInt",
- "parseFloat",
- "isNaN",
- "isFinite",
- "escape",
- "unescape",
- "decodeURI",
- "decodeURIComponent",
- "encodeURI",
- "encodeURIComponent",
- "Object",
- "Function",
- "Array",
- "String",
- "Boolean",
- "Number",
- "Date",
- "RegExp",
- "Error",
- "Math",
- "JSON"
-];
-
-for (var i = 0; i < expectedGlobalPropertyNames.length; ++i)
- shouldBeTrue("globalPropertyNames.indexOf('" + expectedGlobalPropertyNames[i] + "') != -1");
diff --git a/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt b/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
index 1fcf0c1acb..d5fdeded68 100644
--- a/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
+++ b/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
@@ -69,8 +69,8 @@ PASS (function (){'use strict'; try{}catch(eval){}}) threw exception SyntaxError
PASS (function(){(function (){'use strict'; try{}catch(eval){}})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS (function (){'use strict'; try{}catch(arguments){}}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS (function(){(function (){'use strict'; try{}catch(arguments){}})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function (a, a){'use strict';}) threw exception SyntaxError: Strict mode function may not have duplicate parameter names.
-PASS (function(){(function (a, a){'use strict';})}) threw exception SyntaxError: Strict mode function may not have duplicate parameter names.
+PASS (function (a, a){'use strict';}) threw exception SyntaxError: Duplicate parameter name not allowed in this context.
+PASS (function(){(function (a, a){'use strict';})}) threw exception SyntaxError: Duplicate parameter name not allowed in this context.
PASS (function (a){'use strict'; delete a;})() threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
PASS (function(){(function (a){'use strict'; delete a;})()}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
PASS (function (){'use strict'; var a; delete a;})() threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
diff --git a/deps/v8/test/webkit/fast/js/excessive-comma-usage-expected.txt b/deps/v8/test/webkit/fast/js/excessive-comma-usage-expected.txt
index 2460768e4a..a86a1e87b1 100644
--- a/deps/v8/test/webkit/fast/js/excessive-comma-usage-expected.txt
+++ b/deps/v8/test/webkit/fast/js/excessive-comma-usage-expected.txt
@@ -28,7 +28,7 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS new Function(initializerTestString)() is true
PASS new Function(declarationTestString)() is true
-FAIL new Function(commaExpressionTestString)() should be true. Threw exception RangeError: Maximum call stack size exceeded
+PASS new Function(commaExpressionTestString)() is true
PASS successfullyParsed is true
TEST COMPLETE
diff --git a/deps/v8/test/webkit/fast/js/excessive-comma-usage.js b/deps/v8/test/webkit/fast/js/excessive-comma-usage.js
index d30ff6d0f8..414b29b7ef 100644
--- a/deps/v8/test/webkit/fast/js/excessive-comma-usage.js
+++ b/deps/v8/test/webkit/fast/js/excessive-comma-usage.js
@@ -24,17 +24,17 @@
description("Test that we can handle excessively large initializer lists");
var initializerTestString = "var a=0";
-for (var i = 0; i < 50000; i++)
+for (var i = 0; i < 5000; i++)
initializerTestString += ",a"+i+"="+i;
initializerTestString += ";return true;";
var declarationTestString = "var a";
-for (var i = 0; i < 50000; i++)
+for (var i = 0; i < 5000; i++)
declarationTestString += ",a"+i;
declarationTestString += ";return true;";
var commaExpressionTestString = "1";
-for (var i = 0; i < 50000; i++)
+for (var i = 0; i < 5000; i++)
commaExpressionTestString += ",1";
commaExpressionTestString += ";return true;";
diff --git a/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt b/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
index 503bec9687..0f4c4cf9a0 100644
--- a/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
+++ b/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
@@ -422,36 +422,36 @@ PASS Valid: "for (a() in b) break"
PASS Valid: "function f() { for (a() in b) break }"
PASS Valid: "for (a().l[4] in b) break"
PASS Valid: "function f() { for (a().l[4] in b) break }"
-PASS Valid: "for (new a in b in c in d) break"
-PASS Valid: "function f() { for (new a in b in c in d) break }"
-PASS Valid: "for (new new new a in b) break"
-PASS Valid: "function f() { for (new new new a in b) break }"
-FAIL Invalid: "for (delete new a() in b) break" should throw undefined
-FAIL Invalid: "function f() { for (delete new a() in b) break }" should throw undefined
-FAIL Invalid: "for (a * a in b) break" should throw undefined
-FAIL Invalid: "function f() { for (a * a in b) break }" should throw undefined
-PASS Valid: "for ((a * a) in b) break"
-PASS Valid: "function f() { for ((a * a) in b) break }"
-FAIL Invalid: "for (a++ in b) break" should throw undefined
-FAIL Invalid: "function f() { for (a++ in b) break }" should throw undefined
-PASS Valid: "for ((a++) in b) break"
-PASS Valid: "function f() { for ((a++) in b) break }"
-FAIL Invalid: "for (++a in b) break" should throw undefined
-FAIL Invalid: "function f() { for (++a in b) break }" should throw undefined
-PASS Valid: "for ((++a) in b) break"
-PASS Valid: "function f() { for ((++a) in b) break }"
-FAIL Invalid: "for (a, b in c) break" should throw undefined
-FAIL Invalid: "function f() { for (a, b in c) break }" should throw undefined
-FAIL Invalid: "for (a,b in c ;;) break" should throw undefined
-FAIL Invalid: "function f() { for (a,b in c ;;) break }" should throw undefined
+PASS Invalid: "for (new a in b in c in d) break"
+PASS Invalid: "function f() { for (new a in b in c in d) break }"
+PASS Invalid: "for (new new new a in b) break"
+PASS Invalid: "function f() { for (new new new a in b) break }"
+PASS Invalid: "for (delete new a() in b) break"
+PASS Invalid: "function f() { for (delete new a() in b) break }"
+PASS Invalid: "for (a * a in b) break"
+PASS Invalid: "function f() { for (a * a in b) break }"
+PASS Invalid: "for ((a * a) in b) break"
+PASS Invalid: "function f() { for ((a * a) in b) break }"
+PASS Invalid: "for (a++ in b) break"
+PASS Invalid: "function f() { for (a++ in b) break }"
+PASS Invalid: "for ((a++) in b) break"
+PASS Invalid: "function f() { for ((a++) in b) break }"
+PASS Invalid: "for (++a in b) break"
+PASS Invalid: "function f() { for (++a in b) break }"
+PASS Invalid: "for ((++a) in b) break"
+PASS Invalid: "function f() { for ((++a) in b) break }"
+PASS Invalid: "for (a, b in c) break"
+PASS Invalid: "function f() { for (a, b in c) break }"
+PASS Invalid: "for (a,b in c ;;) break"
+PASS Invalid: "function f() { for (a,b in c ;;) break }"
PASS Valid: "for (a,(b in c) ;;) break"
PASS Valid: "function f() { for (a,(b in c) ;;) break }"
-PASS Valid: "for ((a, b) in c) break"
-PASS Valid: "function f() { for ((a, b) in c) break }"
-FAIL Invalid: "for (a ? b : c in c) break" should throw undefined
-FAIL Invalid: "function f() { for (a ? b : c in c) break }" should throw undefined
-PASS Valid: "for ((a ? b : c) in c) break"
-PASS Valid: "function f() { for ((a ? b : c) in c) break }"
+PASS Invalid: "for ((a, b) in c) break"
+PASS Invalid: "function f() { for ((a, b) in c) break }"
+PASS Invalid: "for (a ? b : c in c) break"
+PASS Invalid: "function f() { for (a ? b : c in c) break }"
+PASS Invalid: "for ((a ? b : c) in c) break"
+PASS Invalid: "function f() { for ((a ? b : c) in c) break }"
PASS Valid: "for (var a in b in c) break"
PASS Valid: "function f() { for (var a in b in c) break }"
PASS Valid: "for (var a = 5 += 6 in b) break"
diff --git a/deps/v8/test/webkit/fast/js/parser-syntax-check.js b/deps/v8/test/webkit/fast/js/parser-syntax-check.js
index a1b0e924d7..a3fef13474 100644
--- a/deps/v8/test/webkit/fast/js/parser-syntax-check.js
+++ b/deps/v8/test/webkit/fast/js/parser-syntax-check.js
@@ -291,21 +291,21 @@ invalid("for ( %a ; ; ) { }");
valid ("for (a in b) break");
valid ("for (a() in b) break");
valid ("for (a().l[4] in b) break");
-valid ("for (new a in b in c in d) break");
-valid ("for (new new new a in b) break");
+invalid("for (new a in b in c in d) break");
+invalid("for (new new new a in b) break");
invalid("for (delete new a() in b) break");
invalid("for (a * a in b) break");
-valid ("for ((a * a) in b) break");
+invalid("for ((a * a) in b) break");
invalid("for (a++ in b) break");
-valid ("for ((a++) in b) break");
+invalid("for ((a++) in b) break");
invalid("for (++a in b) break");
-valid ("for ((++a) in b) break");
+invalid("for ((++a) in b) break");
invalid("for (a, b in c) break");
invalid("for (a,b in c ;;) break");
valid ("for (a,(b in c) ;;) break");
-valid ("for ((a, b) in c) break");
+invalid("for ((a, b) in c) break");
invalid("for (a ? b : c in c) break");
-valid ("for ((a ? b : c) in c) break");
+invalid("for ((a ? b : c) in c) break");
valid ("for (var a in b in c) break");
valid ("for (var a = 5 += 6 in b) break");
invalid("for (var a += 5 in b) break");
diff --git a/deps/v8/test/webkit/fast/js/primitive-property-access-edge-cases-expected.txt b/deps/v8/test/webkit/fast/js/primitive-property-access-edge-cases-expected.txt
index df66f4eba4..e86d26a83c 100644
--- a/deps/v8/test/webkit/fast/js/primitive-property-access-edge-cases-expected.txt
+++ b/deps/v8/test/webkit/fast/js/primitive-property-access-edge-cases-expected.txt
@@ -53,15 +53,15 @@ PASS checkWriteStrict(true, Boolean) threw exception TypeError: Cannot assign to
PASS checkNumericGet(1, Number) is true
PASS checkNumericGet('hello', String) is true
PASS checkNumericGet(true, Boolean) is true
-FAIL checkNumericSet(1, Number) should be true. Was false.
-FAIL checkNumericSet('hello', String) should be true. Was false.
-FAIL checkNumericSet(true, Boolean) should be true. Was false.
+PASS checkNumericSet(1, Number) is true
+PASS checkNumericSet('hello', String) is true
+PASS checkNumericSet(true, Boolean) is true
PASS checkNumericGetStrict(1, Number) is true
PASS checkNumericGetStrict('hello', String) is true
PASS checkNumericGetStrict(true, Boolean) is true
-FAIL checkNumericSetStrict(1, Number) should be true. Was false.
-FAIL checkNumericSetStrict('hello', String) should be true. Was false.
-FAIL checkNumericSetStrict(true, Boolean) should be true. Was false.
+PASS checkNumericSetStrict(1, Number) is true
+PASS checkNumericSetStrict('hello', String) is true
+PASS checkNumericSetStrict(true, Boolean) is true
PASS checkNumericRead(1, Number) is true
PASS checkNumericRead('hello', String) is true
PASS checkNumericRead(true, Boolean) is true
@@ -71,9 +71,9 @@ PASS checkNumericWrite(true, Boolean) is true
PASS checkNumericReadStrict(1, Number) is true
PASS checkNumericReadStrict('hello', String) is true
PASS checkNumericReadStrict(true, Boolean) is true
-FAIL checkNumericWriteStrict(1, Number) should throw an exception. Was true.
-FAIL checkNumericWriteStrict('hello', String) should throw an exception. Was true.
-FAIL checkNumericWriteStrict(true, Boolean) should throw an exception. Was true.
+PASS checkNumericWriteStrict(1, Number) threw exception TypeError: Cannot assign to read only property '42' of 1.
+PASS checkNumericWriteStrict('hello', String) threw exception TypeError: Cannot assign to read only property '42' of hello.
+PASS checkNumericWriteStrict(true, Boolean) threw exception TypeError: Cannot assign to read only property '42' of true.
PASS didNotCrash is true
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/function-apply-aliased-expected.txt b/deps/v8/test/webkit/function-apply-aliased-expected.txt
index b6c6c86607..8007e1a546 100644
--- a/deps/v8/test/webkit/function-apply-aliased-expected.txt
+++ b/deps/v8/test/webkit/function-apply-aliased-expected.txt
@@ -45,7 +45,7 @@ PASS myFunctionWithApply.aliasedApply(myObject, ['arg1']) is [myObject, "myFunct
PASS myFunctionWithApply.apply(myObject, arg1Array) is [myFunctionWithApply, "myFunctionWithApply.apply", myObject]
PASS forwarder(myFunctionWithApply, myObject, arg1Array) is [myFunctionWithApply, "myFunctionWithApply.apply", myObject]
PASS myFunctionWithApply.aliasedApply(myObject, arg1Array) is [myObject, "myFunctionWithApply", "arg1"]
-PASS myFunction.apply(null, new Array(5000000)) threw exception RangeError: Maximum call stack size exceeded.
+PASS myFunction.apply(null, new Array(500000)) threw exception RangeError: Maximum call stack size exceeded.
PASS myFunction.apply(null, new Array(1 << 30)) threw exception RangeError: Maximum call stack size exceeded.
PASS recurseArguments.apply(null, new Array(50000)) threw exception RangeError: Maximum call stack size exceeded.
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/function-apply-aliased.js b/deps/v8/test/webkit/function-apply-aliased.js
index cda3b1bc60..a6a7ff4533 100644
--- a/deps/v8/test/webkit/function-apply-aliased.js
+++ b/deps/v8/test/webkit/function-apply-aliased.js
@@ -20,6 +20,7 @@
// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --stack-size=100
description(
"This tests that we can correctly call Function.prototype.apply"
@@ -73,7 +74,7 @@ function stackOverflowTest() {
stackOverflowTest();
} catch(e) {
// Blow the stack with a sparse array
- shouldThrow("myFunction.apply(null, new Array(5000000))");
+ shouldThrow("myFunction.apply(null, new Array(500000))");
// Blow the stack with a sparse array that is sufficiently large to cause int overflow
shouldThrow("myFunction.apply(null, new Array(1 << 30))");
}
diff --git a/deps/v8/tools/check-inline-includes.sh b/deps/v8/tools/check-inline-includes.sh
new file mode 100755
index 0000000000..536afb1dd4
--- /dev/null
+++ b/deps/v8/tools/check-inline-includes.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
+headers=$(find "$v8_root/src" -name '*.h' -not -name '*-inl.h')
+
+for header in $headers; do
+ inline_header_include=$(grep '#include ".*-inl.h"' "$header")
+ if [ -n "$inline_header_include" ]; then
+ echo "The following non-inline header seems to include an inline header:"
+ echo " Header : $header"
+ echo " Include: $inline_header_include"
+ echo
+ fi
+done
+
+echo "Kthxbye."
diff --git a/deps/v8/tools/check-unused-bailouts.sh b/deps/v8/tools/check-unused-bailouts.sh
new file mode 100755
index 0000000000..da4d4a7f46
--- /dev/null
+++ b/deps/v8/tools/check-unused-bailouts.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
+bailouts=$(grep -oP 'V\(\K(k[^,]*)' "$v8_root/src/bailout-reason.h")
+
+for bailout in $bailouts; do
+ bailout_uses=$(grep -r $bailout "$v8_root/src" "$v8_root/test/cctest" | wc -l)
+ if [ $bailout_uses -eq "1" ]; then
+ echo "Bailout reason \"$bailout\" seems to be unused."
+ fi
+done
+
+echo "Kthxbye."
diff --git a/deps/v8/tools/external-reference-check.py b/deps/v8/tools/external-reference-check.py
index bced8d478d..287eca4251 100644
--- a/deps/v8/tools/external-reference-check.py
+++ b/deps/v8/tools/external-reference-check.py
@@ -16,6 +16,7 @@ WORKSPACE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
# Ignore those.
BLACKLISTED = [
+ "fixed_typed_array_base_data_offset",
"page_flags",
"math_exp_constants",
"math_exp_log_table",
diff --git a/deps/v8/tools/gdb-v8-support.py b/deps/v8/tools/gdb-v8-support.py
index 8f5ff5bb5e..5d26146fc7 100644
--- a/deps/v8/tools/gdb-v8-support.py
+++ b/deps/v8/tools/gdb-v8-support.py
@@ -25,6 +25,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import re
kSmiTag = 0
kSmiTagSize = 1
diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit
index 72030e269a..5e6af9d6a8 100644
--- a/deps/v8/tools/gdbinit
+++ b/deps/v8/tools/gdbinit
@@ -20,6 +20,15 @@ Print a v8 Code object from an internal code address
Usage: jco pc
end
+# Print TypeFeedbackVector
+define jfv
+print ((v8::internal::TypeFeedbackVector*)($arg0))->Print()
+end
+document jfv
+Print a v8 TypeFeedbackVector object
+Usage: jtv tagged_ptr
+end
+
# Print DescriptorArray.
define jda
print ((v8::internal::DescriptorArray*)($arg0))->Print()
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 0f90b26698..29416cebd2 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -132,15 +132,6 @@ consts_misc = [
'value': 'JavaScriptFrameConstants::kFunctionOffset' },
{ 'name': 'off_fp_args',
'value': 'JavaScriptFrameConstants::kLastParameterOffset' },
-
- { 'name': 'scopeinfo_idx_nparams',
- 'value': 'ScopeInfo::kParameterCount' },
- { 'name': 'scopeinfo_idx_nstacklocals',
- 'value': 'ScopeInfo::kStackLocalCount' },
- { 'name': 'scopeinfo_idx_ncontextlocals',
- 'value': 'ScopeInfo::kContextLocalCount' },
- { 'name': 'scopeinfo_idx_first_vars',
- 'value': 'ScopeInfo::kVariablePartIndex' },
];
#
@@ -150,13 +141,12 @@ extras_accessors = [
'HeapObject, map, Map, kMapOffset',
'JSObject, elements, Object, kElementsOffset',
'FixedArray, data, uintptr_t, kHeaderSize',
- 'JSTypedArray, length, Object, kLengthOffset',
'Map, instance_attributes, int, kInstanceAttributesOffset',
- 'Map, inobject_properties, int, kInObjectPropertiesOffset',
+ 'Map, inobject_properties_of_constructor_function_index_offset, int, kInObjectPropertiesOrConstructorFunctionIndexOffset',
'Map, instance_size, int, kInstanceSizeOffset',
'Map, bit_field, char, kBitFieldOffset',
'Map, bit_field2, char, kBitField2Offset',
- 'Map, bit_field3, int, kBitField3Offset',
+ 'Map, bit_field3, SMI, kBitField3Offset',
'Map, prototype, Object, kPrototypeOffset',
'NameDictionaryShape, prefix_size, int, kPrefixSize',
'NameDictionaryShape, entry_size, int, kEntrySize',
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index 400991c43f..1e5705d7a5 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -179,6 +179,7 @@
],
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
'<(INTERMEDIATE_DIR)/snapshot.cc',
@@ -224,6 +225,7 @@
],
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
'../../src/snapshot/snapshot-empty.cc',
@@ -419,13 +421,13 @@
'../../src/bootstrapper.h',
'../../src/builtins.cc',
'../../src/builtins.h',
- '../../src/bytecodes-irregexp.h',
'../../src/cached-powers.cc',
'../../src/cached-powers.h',
+ '../../src/cancelable-task.cc',
+ '../../src/cancelable-task.h',
'../../src/char-predicates.cc',
'../../src/char-predicates-inl.h',
'../../src/char-predicates.h',
- '../../src/checks.cc',
'../../src/checks.h',
'../../src/circular-queue-inl.h',
'../../src/circular-queue.h',
@@ -455,6 +457,7 @@
'../../src/compiler/basic-block-instrumentor.h',
'../../src/compiler/change-lowering.cc',
'../../src/compiler/change-lowering.h',
+ '../../src/compiler/c-linkage.cc',
'../../src/compiler/coalesced-live-ranges.cc',
'../../src/compiler/coalesced-live-ranges.h',
'../../src/compiler/code-generator-impl.h',
@@ -475,6 +478,7 @@
'../../src/compiler/dead-code-elimination.cc',
'../../src/compiler/dead-code-elimination.h',
'../../src/compiler/diamond.h',
+ '../../src/compiler/frame.cc',
'../../src/compiler/frame.h',
'../../src/compiler/frame-elider.cc',
'../../src/compiler/frame-elider.h',
@@ -482,7 +486,6 @@
"../../src/compiler/frame-states.h",
'../../src/compiler/gap-resolver.cc',
'../../src/compiler/gap-resolver.h',
- '../../src/compiler/graph-builder.h',
'../../src/compiler/graph-reducer.cc',
'../../src/compiler/graph-reducer.h',
'../../src/compiler/graph-replay.cc',
@@ -501,8 +504,12 @@
'../../src/compiler/instruction-selector.h',
'../../src/compiler/instruction.cc',
'../../src/compiler/instruction.h',
+ '../../src/compiler/interpreter-assembler.cc',
+ '../../src/compiler/interpreter-assembler.h',
'../../src/compiler/js-builtin-reducer.cc',
'../../src/compiler/js-builtin-reducer.h',
+ '../../src/compiler/js-context-relaxation.cc',
+ '../../src/compiler/js-context-relaxation.h',
'../../src/compiler/js-context-specialization.cc',
'../../src/compiler/js-context-specialization.h',
'../../src/compiler/js-frame-specialization.cc',
@@ -519,11 +526,12 @@
'../../src/compiler/js-operator.h',
'../../src/compiler/js-type-feedback.cc',
'../../src/compiler/js-type-feedback.h',
+ '../../src/compiler/js-type-feedback-lowering.cc',
+ '../../src/compiler/js-type-feedback-lowering.h',
'../../src/compiler/js-typed-lowering.cc',
'../../src/compiler/js-typed-lowering.h',
'../../src/compiler/jump-threading.cc',
'../../src/compiler/jump-threading.h',
- '../../src/compiler/linkage-impl.h',
'../../src/compiler/linkage.cc',
'../../src/compiler/linkage.h',
'../../src/compiler/liveness-analyzer.cc',
@@ -565,6 +573,8 @@
'../../src/compiler/pipeline.h',
'../../src/compiler/pipeline-statistics.cc',
'../../src/compiler/pipeline-statistics.h',
+ '../../src/compiler/preprocess-live-ranges.cc',
+ '../../src/compiler/preprocess-live-ranges.h',
'../../src/compiler/raw-machine-assembler.cc',
'../../src/compiler/raw-machine-assembler.h',
'../../src/compiler/register-allocator.cc',
@@ -602,6 +612,8 @@
'../../src/compiler/zone-pool.h',
'../../src/compiler.cc',
'../../src/compiler.h',
+ '../../src/context-measure.cc',
+ '../../src/context-measure.h',
'../../src/contexts.cc',
'../../src/contexts.h',
'../../src/conversions-inl.h',
@@ -617,8 +629,16 @@
'../../src/dateparser-inl.h',
'../../src/dateparser.cc',
'../../src/dateparser.h',
- '../../src/debug.cc',
- '../../src/debug.h',
+ '../../src/debug/debug-evaluate.cc',
+ '../../src/debug/debug-evaluate.h',
+ '../../src/debug/debug-frames.cc',
+ '../../src/debug/debug-frames.h',
+ '../../src/debug/debug-scopes.cc',
+ '../../src/debug/debug-scopes.h',
+ '../../src/debug/debug.cc',
+ '../../src/debug/debug.h',
+ '../../src/debug/liveedit.cc',
+ '../../src/debug/liveedit.h',
'../../src/deoptimizer.cc',
'../../src/deoptimizer.h',
'../../src/disasm.h',
@@ -661,10 +681,12 @@
'../../src/frames-inl.h',
'../../src/frames.cc',
'../../src/frames.h',
- '../../src/full-codegen.cc',
- '../../src/full-codegen.h',
+ '../../src/full-codegen/full-codegen.cc',
+ '../../src/full-codegen/full-codegen.h',
'../../src/func-name-inferrer.cc',
'../../src/func-name-inferrer.h',
+ '../../src/futex-emulation.cc',
+ '../../src/futex-emulation.h',
'../../src/gdb-jit.cc',
'../../src/gdb-jit.h',
'../../src/global-handles.cc',
@@ -776,15 +798,18 @@
'../../src/ic/ic-compiler.h',
'../../src/interface-descriptors.cc',
'../../src/interface-descriptors.h',
- '../../src/interpreter-irregexp.cc',
- '../../src/interpreter-irregexp.h',
+ '../../src/interpreter/bytecodes.cc',
+ '../../src/interpreter/bytecodes.h',
+ '../../src/interpreter/bytecode-generator.cc',
+ '../../src/interpreter/bytecode-generator.h',
+ '../../src/interpreter/bytecode-array-builder.cc',
+ '../../src/interpreter/bytecode-array-builder.h',
+ '../../src/interpreter/interpreter.cc',
+ '../../src/interpreter/interpreter.h',
'../../src/isolate.cc',
'../../src/isolate.h',
'../../src/json-parser.h',
'../../src/json-stringifier.h',
- '../../src/jsregexp-inl.h',
- '../../src/jsregexp.cc',
- '../../src/jsregexp.h',
'../../src/layout-descriptor-inl.h',
'../../src/layout-descriptor.cc',
'../../src/layout-descriptor.h',
@@ -798,8 +823,6 @@
'../../src/lithium.cc',
'../../src/lithium.h',
'../../src/lithium-inl.h',
- '../../src/liveedit.cc',
- '../../src/liveedit.h',
'../../src/log-inl.h',
'../../src/log-utils.cc',
'../../src/log-utils.h',
@@ -842,15 +865,21 @@
'../../src/property.cc',
'../../src/property.h',
'../../src/prototype.h',
- '../../src/regexp-macro-assembler-irregexp-inl.h',
- '../../src/regexp-macro-assembler-irregexp.cc',
- '../../src/regexp-macro-assembler-irregexp.h',
- '../../src/regexp-macro-assembler-tracer.cc',
- '../../src/regexp-macro-assembler-tracer.h',
- '../../src/regexp-macro-assembler.cc',
- '../../src/regexp-macro-assembler.h',
- '../../src/regexp-stack.cc',
- '../../src/regexp-stack.h',
+ '../../src/regexp/bytecodes-irregexp.h',
+ '../../src/regexp/interpreter-irregexp.cc',
+ '../../src/regexp/interpreter-irregexp.h',
+ '../../src/regexp/jsregexp-inl.h',
+ '../../src/regexp/jsregexp.cc',
+ '../../src/regexp/jsregexp.h',
+ '../../src/regexp/regexp-macro-assembler-irregexp-inl.h',
+ '../../src/regexp/regexp-macro-assembler-irregexp.cc',
+ '../../src/regexp/regexp-macro-assembler-irregexp.h',
+ '../../src/regexp/regexp-macro-assembler-tracer.cc',
+ '../../src/regexp/regexp-macro-assembler-tracer.h',
+ '../../src/regexp/regexp-macro-assembler.cc',
+ '../../src/regexp/regexp-macro-assembler.h',
+ '../../src/regexp/regexp-stack.cc',
+ '../../src/regexp/regexp-stack.h',
'../../src/rewriter.cc',
'../../src/rewriter.h',
'../../src/runtime-profiler.cc',
@@ -864,6 +893,7 @@
'../../src/runtime/runtime-debug.cc',
'../../src/runtime/runtime-forin.cc',
'../../src/runtime/runtime-function.cc',
+ '../../src/runtime/runtime-futex.cc',
'../../src/runtime/runtime-generator.cc',
'../../src/runtime/runtime-i18n.cc',
'../../src/runtime/runtime-internal.cc',
@@ -877,6 +907,7 @@
'../../src/runtime/runtime-proxy.cc',
'../../src/runtime/runtime-regexp.cc',
'../../src/runtime/runtime-scopes.cc',
+ '../../src/runtime/runtime-simd.cc',
'../../src/runtime/runtime-strings.cc',
'../../src/runtime/runtime-symbol.cc',
'../../src/runtime/runtime-test.cc',
@@ -900,8 +931,8 @@
'../../src/signature.h',
'../../src/simulator.h',
'../../src/small-pointer-list.h',
- '../../src/smart-pointers.h',
'../../src/snapshot/natives.h',
+ '../../src/snapshot/natives-common.cc',
'../../src/snapshot/serialize.cc',
'../../src/snapshot/serialize.h',
'../../src/snapshot/snapshot.h',
@@ -914,7 +945,6 @@
'../../src/startup-data-util.h',
'../../src/string-builder.cc',
'../../src/string-builder.h',
- '../../src/string-search.cc',
'../../src/string-search.h',
'../../src/string-stream.cc',
'../../src/string-stream.h',
@@ -987,12 +1017,10 @@
'../../src/arm/constants-arm.h',
'../../src/arm/constants-arm.cc',
'../../src/arm/cpu-arm.cc',
- '../../src/arm/debug-arm.cc',
'../../src/arm/deoptimizer-arm.cc',
'../../src/arm/disasm-arm.cc',
'../../src/arm/frames-arm.cc',
'../../src/arm/frames-arm.h',
- '../../src/arm/full-codegen-arm.cc',
'../../src/arm/interface-descriptors-arm.cc',
'../../src/arm/interface-descriptors-arm.h',
'../../src/arm/lithium-arm.cc',
@@ -1003,19 +1031,20 @@
'../../src/arm/lithium-gap-resolver-arm.h',
'../../src/arm/macro-assembler-arm.cc',
'../../src/arm/macro-assembler-arm.h',
- '../../src/arm/regexp-macro-assembler-arm.cc',
- '../../src/arm/regexp-macro-assembler-arm.h',
'../../src/arm/simulator-arm.cc',
'../../src/arm/simulator-arm.h',
'../../src/compiler/arm/code-generator-arm.cc',
'../../src/compiler/arm/instruction-codes-arm.h',
'../../src/compiler/arm/instruction-selector-arm.cc',
- '../../src/compiler/arm/linkage-arm.cc',
+ '../../src/debug/arm/debug-arm.cc',
+ '../../src/full-codegen/arm/full-codegen-arm.cc',
'../../src/ic/arm/access-compiler-arm.cc',
'../../src/ic/arm/handler-compiler-arm.cc',
'../../src/ic/arm/ic-arm.cc',
'../../src/ic/arm/ic-compiler-arm.cc',
'../../src/ic/arm/stub-cache-arm.cc',
+ '../../src/regexp/arm/regexp-macro-assembler-arm.cc',
+ '../../src/regexp/arm/regexp-macro-assembler-arm.h',
],
}],
['v8_target_arch=="arm64"', {
@@ -1030,7 +1059,6 @@
'../../src/arm64/code-stubs-arm64.h',
'../../src/arm64/constants-arm64.h',
'../../src/arm64/cpu-arm64.cc',
- '../../src/arm64/debug-arm64.cc',
'../../src/arm64/decoder-arm64.cc',
'../../src/arm64/decoder-arm64.h',
'../../src/arm64/decoder-arm64-inl.h',
@@ -1042,7 +1070,6 @@
'../../src/arm64/disasm-arm64.h',
'../../src/arm64/frames-arm64.cc',
'../../src/arm64/frames-arm64.h',
- '../../src/arm64/full-codegen-arm64.cc',
'../../src/arm64/instructions-arm64.cc',
'../../src/arm64/instructions-arm64.h',
'../../src/arm64/instrument-arm64.cc',
@@ -1058,8 +1085,6 @@
'../../src/arm64/macro-assembler-arm64.cc',
'../../src/arm64/macro-assembler-arm64.h',
'../../src/arm64/macro-assembler-arm64-inl.h',
- '../../src/arm64/regexp-macro-assembler-arm64.cc',
- '../../src/arm64/regexp-macro-assembler-arm64.h',
'../../src/arm64/simulator-arm64.cc',
'../../src/arm64/simulator-arm64.h',
'../../src/arm64/utils-arm64.cc',
@@ -1067,12 +1092,15 @@
'../../src/compiler/arm64/code-generator-arm64.cc',
'../../src/compiler/arm64/instruction-codes-arm64.h',
'../../src/compiler/arm64/instruction-selector-arm64.cc',
- '../../src/compiler/arm64/linkage-arm64.cc',
+ '../../src/debug/arm64/debug-arm64.cc',
+ '../../src/full-codegen/arm64/full-codegen-arm64.cc',
'../../src/ic/arm64/access-compiler-arm64.cc',
'../../src/ic/arm64/handler-compiler-arm64.cc',
'../../src/ic/arm64/ic-arm64.cc',
'../../src/ic/arm64/ic-compiler-arm64.cc',
'../../src/ic/arm64/stub-cache-arm64.cc',
+ '../../src/regexp/arm64/regexp-macro-assembler-arm64.cc',
+ '../../src/regexp/arm64/regexp-macro-assembler-arm64.h',
],
}],
['v8_target_arch=="ia32"', {
@@ -1086,12 +1114,10 @@
'../../src/ia32/codegen-ia32.cc',
'../../src/ia32/codegen-ia32.h',
'../../src/ia32/cpu-ia32.cc',
- '../../src/ia32/debug-ia32.cc',
'../../src/ia32/deoptimizer-ia32.cc',
'../../src/ia32/disasm-ia32.cc',
'../../src/ia32/frames-ia32.cc',
'../../src/ia32/frames-ia32.h',
- '../../src/ia32/full-codegen-ia32.cc',
'../../src/ia32/interface-descriptors-ia32.cc',
'../../src/ia32/lithium-codegen-ia32.cc',
'../../src/ia32/lithium-codegen-ia32.h',
@@ -1101,17 +1127,18 @@
'../../src/ia32/lithium-ia32.h',
'../../src/ia32/macro-assembler-ia32.cc',
'../../src/ia32/macro-assembler-ia32.h',
- '../../src/ia32/regexp-macro-assembler-ia32.cc',
- '../../src/ia32/regexp-macro-assembler-ia32.h',
'../../src/compiler/ia32/code-generator-ia32.cc',
'../../src/compiler/ia32/instruction-codes-ia32.h',
'../../src/compiler/ia32/instruction-selector-ia32.cc',
- '../../src/compiler/ia32/linkage-ia32.cc',
+ '../../src/debug/ia32/debug-ia32.cc',
+ '../../src/full-codegen/ia32/full-codegen-ia32.cc',
'../../src/ic/ia32/access-compiler-ia32.cc',
'../../src/ic/ia32/handler-compiler-ia32.cc',
'../../src/ic/ia32/ic-ia32.cc',
'../../src/ic/ia32/ic-compiler-ia32.cc',
'../../src/ic/ia32/stub-cache-ia32.cc',
+ '../../src/regexp/ia32/regexp-macro-assembler-ia32.cc',
+ '../../src/regexp/ia32/regexp-macro-assembler-ia32.h',
],
}],
['v8_target_arch=="x87"', {
@@ -1125,12 +1152,10 @@
'../../src/x87/codegen-x87.cc',
'../../src/x87/codegen-x87.h',
'../../src/x87/cpu-x87.cc',
- '../../src/x87/debug-x87.cc',
'../../src/x87/deoptimizer-x87.cc',
'../../src/x87/disasm-x87.cc',
'../../src/x87/frames-x87.cc',
'../../src/x87/frames-x87.h',
- '../../src/x87/full-codegen-x87.cc',
'../../src/x87/interface-descriptors-x87.cc',
'../../src/x87/lithium-codegen-x87.cc',
'../../src/x87/lithium-codegen-x87.h',
@@ -1140,17 +1165,18 @@
'../../src/x87/lithium-x87.h',
'../../src/x87/macro-assembler-x87.cc',
'../../src/x87/macro-assembler-x87.h',
- '../../src/x87/regexp-macro-assembler-x87.cc',
- '../../src/x87/regexp-macro-assembler-x87.h',
'../../src/compiler/x87/code-generator-x87.cc',
'../../src/compiler/x87/instruction-codes-x87.h',
'../../src/compiler/x87/instruction-selector-x87.cc',
- '../../src/compiler/x87/linkage-x87.cc',
+ '../../src/debug/x87/debug-x87.cc',
+ '../../src/full-codegen/x87/full-codegen-x87.cc',
'../../src/ic/x87/access-compiler-x87.cc',
'../../src/ic/x87/handler-compiler-x87.cc',
'../../src/ic/x87/ic-x87.cc',
'../../src/ic/x87/ic-compiler-x87.cc',
'../../src/ic/x87/stub-cache-x87.cc',
+ '../../src/regexp/x87/regexp-macro-assembler-x87.cc',
+ '../../src/regexp/x87/regexp-macro-assembler-x87.h',
],
}],
['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
@@ -1166,12 +1192,10 @@
'../../src/mips/constants-mips.cc',
'../../src/mips/constants-mips.h',
'../../src/mips/cpu-mips.cc',
- '../../src/mips/debug-mips.cc',
'../../src/mips/deoptimizer-mips.cc',
'../../src/mips/disasm-mips.cc',
'../../src/mips/frames-mips.cc',
'../../src/mips/frames-mips.h',
- '../../src/mips/full-codegen-mips.cc',
'../../src/mips/interface-descriptors-mips.cc',
'../../src/mips/lithium-codegen-mips.cc',
'../../src/mips/lithium-codegen-mips.h',
@@ -1181,19 +1205,20 @@
'../../src/mips/lithium-mips.h',
'../../src/mips/macro-assembler-mips.cc',
'../../src/mips/macro-assembler-mips.h',
- '../../src/mips/regexp-macro-assembler-mips.cc',
- '../../src/mips/regexp-macro-assembler-mips.h',
'../../src/mips/simulator-mips.cc',
'../../src/mips/simulator-mips.h',
'../../src/compiler/mips/code-generator-mips.cc',
'../../src/compiler/mips/instruction-codes-mips.h',
'../../src/compiler/mips/instruction-selector-mips.cc',
- '../../src/compiler/mips/linkage-mips.cc',
+ '../../src/full-codegen/mips/full-codegen-mips.cc',
+ '../../src/debug/mips/debug-mips.cc',
'../../src/ic/mips/access-compiler-mips.cc',
'../../src/ic/mips/handler-compiler-mips.cc',
'../../src/ic/mips/ic-mips.cc',
'../../src/ic/mips/ic-compiler-mips.cc',
'../../src/ic/mips/stub-cache-mips.cc',
+ '../../src/regexp/mips/regexp-macro-assembler-mips.cc',
+ '../../src/regexp/mips/regexp-macro-assembler-mips.h',
],
}],
['v8_target_arch=="mips64el"', {
@@ -1209,12 +1234,10 @@
'../../src/mips64/constants-mips64.cc',
'../../src/mips64/constants-mips64.h',
'../../src/mips64/cpu-mips64.cc',
- '../../src/mips64/debug-mips64.cc',
'../../src/mips64/deoptimizer-mips64.cc',
'../../src/mips64/disasm-mips64.cc',
'../../src/mips64/frames-mips64.cc',
'../../src/mips64/frames-mips64.h',
- '../../src/mips64/full-codegen-mips64.cc',
'../../src/mips64/interface-descriptors-mips64.cc',
'../../src/mips64/lithium-codegen-mips64.cc',
'../../src/mips64/lithium-codegen-mips64.h',
@@ -1224,19 +1247,20 @@
'../../src/mips64/lithium-mips64.h',
'../../src/mips64/macro-assembler-mips64.cc',
'../../src/mips64/macro-assembler-mips64.h',
- '../../src/mips64/regexp-macro-assembler-mips64.cc',
- '../../src/mips64/regexp-macro-assembler-mips64.h',
'../../src/mips64/simulator-mips64.cc',
'../../src/mips64/simulator-mips64.h',
'../../src/compiler/mips64/code-generator-mips64.cc',
'../../src/compiler/mips64/instruction-codes-mips64.h',
'../../src/compiler/mips64/instruction-selector-mips64.cc',
- '../../src/compiler/mips64/linkage-mips64.cc',
+ '../../src/debug/mips64/debug-mips64.cc',
+ '../../src/full-codegen/mips64/full-codegen-mips64.cc',
'../../src/ic/mips64/access-compiler-mips64.cc',
'../../src/ic/mips64/handler-compiler-mips64.cc',
'../../src/ic/mips64/ic-mips64.cc',
'../../src/ic/mips64/ic-compiler-mips64.cc',
'../../src/ic/mips64/stub-cache-mips64.cc',
+ '../../src/regexp/mips64/regexp-macro-assembler-mips64.cc',
+ '../../src/regexp/mips64/regexp-macro-assembler-mips64.h',
],
}],
['v8_target_arch=="x64" or v8_target_arch=="x32"', {
@@ -1250,12 +1274,10 @@
'../../src/x64/codegen-x64.cc',
'../../src/x64/codegen-x64.h',
'../../src/x64/cpu-x64.cc',
- '../../src/x64/debug-x64.cc',
'../../src/x64/deoptimizer-x64.cc',
'../../src/x64/disasm-x64.cc',
'../../src/x64/frames-x64.cc',
'../../src/x64/frames-x64.h',
- '../../src/x64/full-codegen-x64.cc',
'../../src/x64/interface-descriptors-x64.cc',
'../../src/x64/lithium-codegen-x64.cc',
'../../src/x64/lithium-codegen-x64.h',
@@ -1265,13 +1287,15 @@
'../../src/x64/lithium-x64.h',
'../../src/x64/macro-assembler-x64.cc',
'../../src/x64/macro-assembler-x64.h',
- '../../src/x64/regexp-macro-assembler-x64.cc',
- '../../src/x64/regexp-macro-assembler-x64.h',
+ '../../src/debug/x64/debug-x64.cc',
+ '../../src/full-codegen/x64/full-codegen-x64.cc',
'../../src/ic/x64/access-compiler-x64.cc',
'../../src/ic/x64/handler-compiler-x64.cc',
'../../src/ic/x64/ic-x64.cc',
'../../src/ic/x64/ic-compiler-x64.cc',
'../../src/ic/x64/stub-cache-x64.cc',
+ '../../src/regexp/x64/regexp-macro-assembler-x64.cc',
+ '../../src/regexp/x64/regexp-macro-assembler-x64.h',
],
}],
['v8_target_arch=="x64"', {
@@ -1279,7 +1303,6 @@
'../../src/compiler/x64/code-generator-x64.cc',
'../../src/compiler/x64/instruction-codes-x64.h',
'../../src/compiler/x64/instruction-selector-x64.cc',
- '../../src/compiler/x64/linkage-x64.cc',
],
}],
['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
@@ -1295,12 +1318,10 @@
'../../src/ppc/constants-ppc.h',
'../../src/ppc/constants-ppc.cc',
'../../src/ppc/cpu-ppc.cc',
- '../../src/ppc/debug-ppc.cc',
'../../src/ppc/deoptimizer-ppc.cc',
'../../src/ppc/disasm-ppc.cc',
'../../src/ppc/frames-ppc.cc',
'../../src/ppc/frames-ppc.h',
- '../../src/ppc/full-codegen-ppc.cc',
'../../src/ppc/interface-descriptors-ppc.cc',
'../../src/ppc/interface-descriptors-ppc.h',
'../../src/ppc/lithium-ppc.cc',
@@ -1311,19 +1332,20 @@
'../../src/ppc/lithium-gap-resolver-ppc.h',
'../../src/ppc/macro-assembler-ppc.cc',
'../../src/ppc/macro-assembler-ppc.h',
- '../../src/ppc/regexp-macro-assembler-ppc.cc',
- '../../src/ppc/regexp-macro-assembler-ppc.h',
'../../src/ppc/simulator-ppc.cc',
'../../src/ppc/simulator-ppc.h',
'../../src/compiler/ppc/code-generator-ppc.cc',
'../../src/compiler/ppc/instruction-codes-ppc.h',
'../../src/compiler/ppc/instruction-selector-ppc.cc',
- '../../src/compiler/ppc/linkage-ppc.cc',
+ '../../src/debug/ppc/debug-ppc.cc',
+ '../../src/full-codegen/ppc/full-codegen-ppc.cc',
'../../src/ic/ppc/access-compiler-ppc.cc',
'../../src/ic/ppc/handler-compiler-ppc.cc',
'../../src/ic/ppc/ic-ppc.cc',
'../../src/ic/ppc/ic-compiler-ppc.cc',
'../../src/ic/ppc/stub-cache-ppc.cc',
+ '../../src/regexp/ppc/regexp-macro-assembler-ppc.cc',
+ '../../src/regexp/ppc/regexp-macro-assembler-ppc.h',
],
}],
['OS=="win"', {
@@ -1374,6 +1396,9 @@
}],
],
}],
+ ['v8_wasm!=0', {
+ 'dependencies': ['../../third_party/wasm/src/wasm/wasm.gyp:wasm'],
+ }],
],
},
{
@@ -1432,6 +1457,7 @@
'../../src/base/safe_conversions_impl.h',
'../../src/base/safe_math.h',
'../../src/base/safe_math_impl.h',
+ '../../src/base/smart-pointers.h',
'../../src/base/sys-info.cc',
'../../src/base/sys-info.h',
'../../src/base/utils/random-number-generator.cc',
@@ -1685,6 +1711,7 @@
'inputs': [
'../../tools/concatenate-files.py',
'<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
+ '<(SHARED_INTERMEDIATE_DIR)/libraries-code-stub.bin',
'<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
'<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
],
@@ -1750,8 +1777,8 @@
'library_files': [
'../../src/macros.py',
'../../src/messages.h',
- '../../src/runtime.js',
'../../src/prologue.js',
+ '../../src/runtime.js',
'../../src/v8natives.js',
'../../src/symbol.js',
'../../src/array.js',
@@ -1774,12 +1801,12 @@
'../../src/json.js',
'../../src/array-iterator.js',
'../../src/string-iterator.js',
- '../../src/debug-debugger.js',
- '../../src/mirror-debugger.js',
- '../../src/liveedit-debugger.js',
'../../src/templates.js',
'../../src/harmony-array.js',
'../../src/harmony-typedarray.js',
+ '../../src/debug/mirrors.js',
+ '../../src/debug/debug.js',
+ '../../src/debug/liveedit.js',
],
'experimental_library_files': [
'../../src/macros.py',
@@ -1794,9 +1821,17 @@
'../../src/harmony-reflect.js',
'../../src/harmony-spread.js',
'../../src/harmony-object.js',
+ '../../src/harmony-object-observe.js',
'../../src/harmony-sharedarraybuffer.js',
+ '../../src/harmony-simd.js',
+ ],
+ 'code_stub_library_files': [
+ '../../src/macros.py',
+ '../../src/messages.h',
+ '../../src/code-stubs.js',
],
'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
+ 'libraries_code_stub_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-code-stub.bin',
'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
'libraries_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
},
@@ -1854,6 +1889,31 @@
],
},
{
+ 'action_name': 'js2c_code_stubs',
+ 'inputs': [
+ '../../tools/js2c.py',
+ '<@(code_stub_library_files)',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc',
+ ],
+ 'action': [
+ 'python',
+ '../../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc',
+ 'CODE_STUB',
+ '<@(code_stub_library_files)'
+ ],
+ 'conditions': [
+ [ 'v8_use_external_startup_data==1', {
+ 'outputs': ['<@(libraries_code_stub_bin_file)'],
+ 'action': [
+ '--startup_blob', '<@(libraries_code_stub_bin_file)'
+ ],
+ }],
+ ],
+ },
+ {
'action_name': 'js2c_extras',
'inputs': [
'../../tools/js2c.py',
diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py
index b5436f90bb..c280537379 100755
--- a/deps/v8/tools/js2c.py
+++ b/deps/v8/tools/js2c.py
@@ -196,7 +196,7 @@ def ReadMacros(lines):
return (constants, macros)
-TEMPLATE_PATTERN = re.compile(r'^\s+T\(([A-Z][a-zA-Z]*),')
+TEMPLATE_PATTERN = re.compile(r'^\s+T\(([A-Z][a-zA-Z0-9]*),')
def ReadMessageTemplates(lines):
templates = []
@@ -377,7 +377,7 @@ class Sources:
def IsDebuggerFile(filename):
- return filename.endswith("-debugger.js")
+ return "debug" in filename
def IsMacroFile(filename):
return filename.endswith("macros.py")
@@ -447,7 +447,7 @@ def PrepareSources(source_files, native_type, emit_js):
result.is_debugger_id.append(is_debugger)
name = os.path.basename(source)[:-3]
- result.names.append(name if not is_debugger else name[:-9])
+ result.names.append(name)
return result
diff --git a/deps/v8/tools/run-deopt-fuzzer.py b/deps/v8/tools/run-deopt-fuzzer.py
index 4e361ae9c8..7fbf402d95 100755
--- a/deps/v8/tools/run-deopt-fuzzer.py
+++ b/deps/v8/tools/run-deopt-fuzzer.py
@@ -417,7 +417,7 @@ def Execute(arch, mode, args, options, suites, workspace):
test_backup[s] = s.tests
analysis_flags = ["--deopt-every-n-times", "%d" % MAX_DEOPT,
"--print-deopt-stress"]
- s.tests = [ t.CopyAddingFlags(analysis_flags) for t in s.tests ]
+ s.tests = [ t.CopyAddingFlags(t.variant, analysis_flags) for t in s.tests ]
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
@@ -464,7 +464,7 @@ def Execute(arch, mode, args, options, suites, workspace):
print "%s %s" % (t.path, distribution)
for i in distribution:
fuzzing_flags = ["--deopt-every-n-times", "%d" % i]
- s.tests.append(t.CopyAddingFlags(fuzzing_flags))
+ s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags))
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index 76476271f1..9c6f30a365 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -44,7 +44,7 @@ import time
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
-from testrunner.local.testsuite import VARIANT_FLAGS
+from testrunner.local.testsuite import ALL_VARIANTS
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.network import network_execution
@@ -322,8 +322,17 @@ def RandomSeed():
return seed
+def BuildbotToV8Mode(config):
+ """Convert buildbot build configs to configs understood by the v8 runner.
+
+ V8 configs are always lower case and without the additional _x64 suffix for
+ 64 bit builds on windows with ninja.
+ """
+ mode = config[:-4] if config.endswith('_x64') else config
+ return mode.lower()
+
def ProcessOptions(options):
- global VARIANT_FLAGS
+ global ALL_VARIANTS
global VARIANTS
# Architecture and mode related stuff.
@@ -334,7 +343,7 @@ def ProcessOptions(options):
options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
options.mode = options.mode.split(",")
for mode in options.mode:
- if not mode.lower() in MODES:
+ if not BuildbotToV8Mode(mode) in MODES:
print "Unknown mode %s" % mode
return False
if options.arch in ["auto", "native"]:
@@ -414,8 +423,8 @@ def ProcessOptions(options):
VARIANTS = ["stress"]
if options.variants:
VARIANTS = options.variants.split(",")
- if not set(VARIANTS).issubset(VARIANT_FLAGS.keys()):
- print "All variants must be in %s" % str(VARIANT_FLAGS.keys())
+ if not set(VARIANTS).issubset(ALL_VARIANTS):
+ print "All variants must be in %s" % str(ALL_VARIANTS)
return False
if options.predictable:
VARIANTS = ["default"]
@@ -496,7 +505,7 @@ def Main():
else:
args_suites = OrderedDict() # Used as set
for arg in args:
- args_suites[arg.split(os.path.sep)[0]] = True
+ args_suites[arg.split('/')[0]] = True
suite_paths = [ s for s in args_suites if s in suite_paths ]
suites = []
@@ -531,7 +540,7 @@ def Execute(arch, mode, args, options, suites, workspace):
# TODO(machenbach): Get rid of different output folder location on
# buildbot. Currently this is capitalized Release and Debug.
shell_dir = os.path.join(workspace, options.outdir, mode)
- mode = mode.lower()
+ mode = BuildbotToV8Mode(mode)
else:
shell_dir = os.path.join(
workspace,
@@ -612,10 +621,11 @@ def Execute(arch, mode, args, options, suites, workspace):
if options.cat:
verbose.PrintTestSource(s.tests)
continue
- variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
- variant_tests = [ t.CopyAddingFlags(v)
+ variant_gen = s.CreateVariantGenerator(VARIANTS)
+ variant_tests = [ t.CopyAddingFlags(v, flags)
for t in s.tests
- for v in s.VariantFlags(t, variant_flags) ]
+ for v in variant_gen.FilterVariantsByTest(t)
+ for flags in variant_gen.GetFlagSets(t, v) ]
if options.random_seed_stress_count > 1:
# Duplicate test for random seed stress mode.
@@ -628,9 +638,9 @@ def Execute(arch, mode, args, options, suites, workspace):
else:
yield ["--random-seed=%d" % RandomSeed()]
s.tests = [
- t.CopyAddingFlags(v)
+ t.CopyAddingFlags(t.variant, flags)
for t in variant_tests
- for v in iter_seed_flags()
+ for flags in iter_seed_flags()
]
else:
s.tests = variant_tests
@@ -653,11 +663,13 @@ def Execute(arch, mode, args, options, suites, workspace):
options.junitout, options.junittestsuite))
if options.json_test_results:
progress_indicator.Register(progress.JsonTestProgressIndicator(
- options.json_test_results, arch, MODES[mode]["execution_mode"]))
+ options.json_test_results, arch, MODES[mode]["execution_mode"],
+ ctx.random_seed))
run_networked = not options.no_network
if not run_networked:
- print("Network distribution disabled, running tests locally.")
+ if verbose_output:
+ print("Network distribution disabled, running tests locally.")
elif utils.GuessOS() != "linux":
print("Network distribution is only supported on Linux, sorry!")
run_networked = False
@@ -685,6 +697,10 @@ def Execute(arch, mode, args, options, suites, workspace):
if options.time:
verbose.PrintTestDurations(suites, overall_duration)
+
+ if num_tests == 0:
+ print("Warning: no tests were run!")
+
return exit_code
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index c04e4e77c3..31331686fa 100755
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -126,16 +126,16 @@ def LoadAndroidBuildTools(path): # pragma: no cover
assert os.path.exists(path)
sys.path.insert(0, path)
- from pylib.device import device_utils # pylint: disable=F0401
+ from pylib.device import adb_wrapper # pylint: disable=F0401
from pylib.device import device_errors # pylint: disable=F0401
+ from pylib.device import device_utils # pylint: disable=F0401
from pylib.perf import cache_control # pylint: disable=F0401
from pylib.perf import perf_control # pylint: disable=F0401
- import pylib.android_commands # pylint: disable=F0401
+ global adb_wrapper
global cache_control
global device_errors
global device_utils
global perf_control
- global pylib
def GeometricMean(values):
@@ -169,6 +169,174 @@ class Results(object):
return str(self.ToDict())
+class Measurement(object):
+ """Represents a series of results of one trace.
+
+ The results are from repetitive runs of the same executable. They are
+ gathered by repeated calls to ConsumeOutput.
+ """
+ def __init__(self, graphs, units, results_regexp, stddev_regexp):
+ self.name = graphs[-1]
+ self.graphs = graphs
+ self.units = units
+ self.results_regexp = results_regexp
+ self.stddev_regexp = stddev_regexp
+ self.results = []
+ self.errors = []
+ self.stddev = ""
+
+ def ConsumeOutput(self, stdout):
+ try:
+ result = re.search(self.results_regexp, stdout, re.M).group(1)
+ self.results.append(str(float(result)))
+ except ValueError:
+ self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
+ % (self.results_regexp, self.name))
+ except:
+ self.errors.append("Regexp \"%s\" didn't match for test %s."
+ % (self.results_regexp, self.name))
+
+ try:
+ if self.stddev_regexp and self.stddev:
+ self.errors.append("Test %s should only run once since a stddev "
+ "is provided by the test." % self.name)
+ if self.stddev_regexp:
+ self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
+ except:
+ self.errors.append("Regexp \"%s\" didn't match for test %s."
+ % (self.stddev_regexp, self.name))
+
+ def GetResults(self):
+ return Results([{
+ "graphs": self.graphs,
+ "units": self.units,
+ "results": self.results,
+ "stddev": self.stddev,
+ }], self.errors)
+
+
+class NullMeasurement(object):
+ """Null object to avoid having extra logic for configurations that didn't
+ run like running without patch on trybots.
+ """
+ def ConsumeOutput(self, stdout):
+ pass
+
+ def GetResults(self):
+ return Results()
+
+
+def Unzip(iterable):
+ left = []
+ right = []
+ for l, r in iterable:
+ left.append(l)
+ right.append(r)
+ return lambda: iter(left), lambda: iter(right)
+
+
+def AccumulateResults(
+ graph_names, trace_configs, iter_output, trybot, no_patch, calc_total):
+ """Iterates over the output of multiple benchmark reruns and accumulates
+ results for a configured list of traces.
+
+ Args:
+ graph_names: List of names that configure the base path of the traces. E.g.
+ ['v8', 'Octane'].
+ trace_configs: List of "TraceConfig" instances. Each trace config defines
+ how to perform a measurement.
+ iter_output: Iterator over the standard output of each test run.
+ trybot: Indicates that this is run in trybot mode, i.e. run twice, once
+ with once without patch.
+ no_patch: Indicates weather this is a trybot run without patch.
+ calc_total: Boolean flag to speficy the calculation of a summary trace.
+ Returns: A "Results" object.
+ """
+ measurements = [
+ trace.CreateMeasurement(trybot, no_patch) for trace in trace_configs]
+ for stdout in iter_output():
+ for measurement in measurements:
+ measurement.ConsumeOutput(stdout)
+
+ res = reduce(lambda r, m: r + m.GetResults(), measurements, Results())
+
+ if not res.traces or not calc_total:
+ return res
+
+ # Assume all traces have the same structure.
+ if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
+ res.errors.append("Not all traces have the same number of results.")
+ return res
+
+ # Calculate the geometric means for all traces. Above we made sure that
+ # there is at least one trace and that the number of results is the same
+ # for each trace.
+ n_results = len(res.traces[0]["results"])
+ total_results = [GeometricMean(t["results"][i] for t in res.traces)
+ for i in range(0, n_results)]
+ res.traces.append({
+ "graphs": graph_names + ["Total"],
+ "units": res.traces[0]["units"],
+ "results": total_results,
+ "stddev": "",
+ })
+ return res
+
+
+def AccumulateGenericResults(graph_names, suite_units, iter_output):
+ """Iterates over the output of multiple benchmark reruns and accumulates
+ generic results.
+
+ Args:
+ graph_names: List of names that configure the base path of the traces. E.g.
+ ['v8', 'Octane'].
+ suite_units: Measurement default units as defined by the benchmark suite.
+ iter_output: Iterator over the standard output of each test run.
+ Returns: A "Results" object.
+ """
+ traces = OrderedDict()
+ for stdout in iter_output():
+ if stdout is None:
+ # The None value is used as a null object to simplify logic.
+ continue
+ for line in stdout.strip().splitlines():
+ match = GENERIC_RESULTS_RE.match(line)
+ if match:
+ stddev = ""
+ graph = match.group(1)
+ trace = match.group(2)
+ body = match.group(3)
+ units = match.group(4)
+ match_stddev = RESULT_STDDEV_RE.match(body)
+ match_list = RESULT_LIST_RE.match(body)
+ errors = []
+ if match_stddev:
+ result, stddev = map(str.strip, match_stddev.group(1).split(","))
+ results = [result]
+ elif match_list:
+ results = map(str.strip, match_list.group(1).split(","))
+ else:
+ results = [body.strip()]
+
+ try:
+ results = map(lambda r: str(float(r)), results)
+ except ValueError:
+ results = []
+ errors = ["Found non-numeric in %s" %
+ "/".join(graph_names + [graph, trace])]
+
+ trace_result = traces.setdefault(trace, Results([{
+ "graphs": graph_names + [graph, trace],
+ "units": (units or suite_units).strip(),
+ "results": [],
+ "stddev": "",
+ }], errors))
+ trace_result.traces[0]["results"].extend(results)
+ trace_result.traces[0]["stddev"] = stddev
+
+ return reduce(lambda r, t: r + t, traces.itervalues(), Results())
+
+
class Node(object):
"""Represents a node in the suite tree structure."""
def __init__(self, *args):
@@ -196,13 +364,13 @@ class DefaultSentinel(Node):
self.total = False
-class Graph(Node):
+class GraphConfig(Node):
"""Represents a suite definition.
Can either be a leaf or an inner node that provides default values.
"""
def __init__(self, suite, parent, arch):
- super(Graph, self).__init__()
+ super(GraphConfig, self).__init__()
self._suite = suite
assert isinstance(suite.get("path", []), list)
@@ -248,49 +416,26 @@ class Graph(Node):
self.stddev_regexp = suite.get("stddev_regexp", stddev_default)
-class Trace(Graph):
- """Represents a leaf in the suite tree structure.
-
- Handles collection of measurements.
- """
+class TraceConfig(GraphConfig):
+ """Represents a leaf in the suite tree structure."""
def __init__(self, suite, parent, arch):
- super(Trace, self).__init__(suite, parent, arch)
+ super(TraceConfig, self).__init__(suite, parent, arch)
assert self.results_regexp
- self.results = []
- self.errors = []
- self.stddev = ""
-
- def ConsumeOutput(self, stdout):
- try:
- result = re.search(self.results_regexp, stdout, re.M).group(1)
- self.results.append(str(float(result)))
- except ValueError:
- self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
- % (self.results_regexp, self.graphs[-1]))
- except:
- self.errors.append("Regexp \"%s\" didn't match for test %s."
- % (self.results_regexp, self.graphs[-1]))
- try:
- if self.stddev_regexp and self.stddev:
- self.errors.append("Test %s should only run once since a stddev "
- "is provided by the test." % self.graphs[-1])
- if self.stddev_regexp:
- self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
- except:
- self.errors.append("Regexp \"%s\" didn't match for test %s."
- % (self.stddev_regexp, self.graphs[-1]))
+ def CreateMeasurement(self, trybot, no_patch):
+ if not trybot and no_patch:
+ # Use null object for no-patch logic if this is not a trybot run.
+ return NullMeasurement()
- def GetResults(self):
- return Results([{
- "graphs": self.graphs,
- "units": self.units,
- "results": self.results,
- "stddev": self.stddev,
- }], self.errors)
+ return Measurement(
+ self.graphs,
+ self.units,
+ self.results_regexp,
+ self.stddev_regexp,
+ )
-class Runnable(Graph):
+class RunnableConfig(GraphConfig):
"""Represents a runnable suite definition (i.e. has a main file).
"""
@property
@@ -315,119 +460,85 @@ class Runnable(Graph):
cmd = [os.path.join(shell_dir, self.binary)]
return cmd + self.GetCommandFlags(extra_flags=extra_flags)
- def Run(self, runner):
+ def Run(self, runner, trybot):
"""Iterates over several runs and handles the output for all traces."""
- for stdout in runner():
- for trace in self._children:
- trace.ConsumeOutput(stdout)
- res = reduce(lambda r, t: r + t.GetResults(), self._children, Results())
-
- if not res.traces or not self.total:
- return res
-
- # Assume all traces have the same structure.
- if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
- res.errors.append("Not all traces have the same number of results.")
- return res
-
- # Calculate the geometric means for all traces. Above we made sure that
- # there is at least one trace and that the number of results is the same
- # for each trace.
- n_results = len(res.traces[0]["results"])
- total_results = [GeometricMean(t["results"][i] for t in res.traces)
- for i in range(0, n_results)]
- res.traces.append({
- "graphs": self.graphs + ["Total"],
- "units": res.traces[0]["units"],
- "results": total_results,
- "stddev": "",
- })
- return res
+ stdout_with_patch, stdout_no_patch = Unzip(runner())
+ return (
+ AccumulateResults(
+ self.graphs,
+ self._children,
+ iter_output=stdout_with_patch,
+ trybot=trybot,
+ no_patch=False,
+ calc_total=self.total,
+ ),
+ AccumulateResults(
+ self.graphs,
+ self._children,
+ iter_output=stdout_no_patch,
+ trybot=trybot,
+ no_patch=True,
+ calc_total=self.total,
+ ),
+ )
-class RunnableTrace(Trace, Runnable):
+
+class RunnableTraceConfig(TraceConfig, RunnableConfig):
"""Represents a runnable suite definition that is a leaf."""
def __init__(self, suite, parent, arch):
- super(RunnableTrace, self).__init__(suite, parent, arch)
+ super(RunnableTraceConfig, self).__init__(suite, parent, arch)
- def Run(self, runner):
+ def Run(self, runner, trybot):
"""Iterates over several runs and handles the output."""
- for stdout in runner():
- self.ConsumeOutput(stdout)
- return self.GetResults()
+ measurement_with_patch = self.CreateMeasurement(trybot, False)
+ measurement_no_patch = self.CreateMeasurement(trybot, True)
+ for stdout_with_patch, stdout_no_patch in runner():
+ measurement_with_patch.ConsumeOutput(stdout_with_patch)
+ measurement_no_patch.ConsumeOutput(stdout_no_patch)
+ return (
+ measurement_with_patch.GetResults(),
+ measurement_no_patch.GetResults(),
+ )
-class RunnableGeneric(Runnable):
+class RunnableGenericConfig(RunnableConfig):
"""Represents a runnable suite definition with generic traces."""
def __init__(self, suite, parent, arch):
- super(RunnableGeneric, self).__init__(suite, parent, arch)
+ super(RunnableGenericConfig, self).__init__(suite, parent, arch)
- def Run(self, runner):
- """Iterates over several runs and handles the output."""
- traces = OrderedDict()
- for stdout in runner():
- for line in stdout.strip().splitlines():
- match = GENERIC_RESULTS_RE.match(line)
- if match:
- stddev = ""
- graph = match.group(1)
- trace = match.group(2)
- body = match.group(3)
- units = match.group(4)
- match_stddev = RESULT_STDDEV_RE.match(body)
- match_list = RESULT_LIST_RE.match(body)
- errors = []
- if match_stddev:
- result, stddev = map(str.strip, match_stddev.group(1).split(","))
- results = [result]
- elif match_list:
- results = map(str.strip, match_list.group(1).split(","))
- else:
- results = [body.strip()]
-
- try:
- results = map(lambda r: str(float(r)), results)
- except ValueError:
- results = []
- errors = ["Found non-numeric in %s" %
- "/".join(self.graphs + [graph, trace])]
-
- trace_result = traces.setdefault(trace, Results([{
- "graphs": self.graphs + [graph, trace],
- "units": (units or self.units).strip(),
- "results": [],
- "stddev": "",
- }], errors))
- trace_result.traces[0]["results"].extend(results)
- trace_result.traces[0]["stddev"] = stddev
-
- return reduce(lambda r, t: r + t, traces.itervalues(), Results())
-
-
-def MakeGraph(suite, arch, parent):
- """Factory method for making graph objects."""
- if isinstance(parent, Runnable):
+ def Run(self, runner, trybot):
+ stdout_with_patch, stdout_no_patch = Unzip(runner())
+ return (
+ AccumulateGenericResults(self.graphs, self.units, stdout_with_patch),
+ AccumulateGenericResults(self.graphs, self.units, stdout_no_patch),
+ )
+
+
+def MakeGraphConfig(suite, arch, parent):
+ """Factory method for making graph configuration objects."""
+ if isinstance(parent, RunnableConfig):
# Below a runnable can only be traces.
- return Trace(suite, parent, arch)
+ return TraceConfig(suite, parent, arch)
elif suite.get("main") is not None:
# A main file makes this graph runnable. Empty strings are accepted.
if suite.get("tests"):
# This graph has subgraphs (traces).
- return Runnable(suite, parent, arch)
+ return RunnableConfig(suite, parent, arch)
else:
# This graph has no subgraphs, it's a leaf.
- return RunnableTrace(suite, parent, arch)
+ return RunnableTraceConfig(suite, parent, arch)
elif suite.get("generic"):
# This is a generic suite definition. It is either a runnable executable
# or has a main js file.
- return RunnableGeneric(suite, parent, arch)
+ return RunnableGenericConfig(suite, parent, arch)
elif suite.get("tests"):
# This is neither a leaf nor a runnable.
- return Graph(suite, parent, arch)
+ return GraphConfig(suite, parent, arch)
else: # pragma: no cover
raise Exception("Invalid suite configuration.")
-def BuildGraphs(suite, arch, parent=None):
+def BuildGraphConfigs(suite, arch, parent=None):
"""Builds a tree structure of graph objects that corresponds to the suite
configuration.
"""
@@ -437,9 +548,9 @@ def BuildGraphs(suite, arch, parent=None):
if arch not in suite.get("archs", SUPPORTED_ARCHS):
return None
- graph = MakeGraph(suite, arch, parent)
+ graph = MakeGraphConfig(suite, arch, parent)
for subsuite in suite.get("tests", []):
- BuildGraphs(subsuite, arch, graph)
+ BuildGraphConfigs(subsuite, arch, graph)
parent.AppendChild(graph)
return graph
@@ -449,7 +560,7 @@ def FlattenRunnables(node, node_cb):
runnables.
"""
node_cb(node)
- if isinstance(node, Runnable):
+ if isinstance(node, RunnableConfig):
yield node
elif isinstance(node, Node):
for child in node._children:
@@ -462,6 +573,7 @@ def FlattenRunnables(node, node_cb):
class Platform(object):
def __init__(self, options):
self.shell_dir = options.shell_dir
+ self.shell_dir_no_patch = options.shell_dir_no_patch
self.extra_flags = options.extra_flags.split()
@staticmethod
@@ -471,6 +583,27 @@ class Platform(object):
else:
return DesktopPlatform(options)
+ def _Run(self, runnable, count, no_patch=False):
+ raise NotImplementedError() # pragma: no cover
+
+ def Run(self, runnable, count):
+ """Execute the benchmark's main file.
+
+ If options.shell_dir_no_patch is specified, the benchmark is run once with
+ and once without patch.
+ Args:
+ runnable: A Runnable benchmark instance.
+ count: The number of this (repeated) run.
+ Returns: A tuple with the benchmark outputs with and without patch. The
+ latter will be None if options.shell_dir_no_patch was not
+ specified.
+ """
+ stdout = self._Run(runnable, count, no_patch=False)
+ if self.shell_dir_no_patch:
+ return stdout, self._Run(runnable, count, no_patch=True)
+ else:
+ return stdout, None
+
class DesktopPlatform(Platform):
def __init__(self, options):
@@ -483,24 +616,27 @@ class DesktopPlatform(Platform):
pass
def PreTests(self, node, path):
- if isinstance(node, Runnable):
+ if isinstance(node, RunnableConfig):
node.ChangeCWD(path)
- def Run(self, runnable, count):
+ def _Run(self, runnable, count, no_patch=False):
+ suffix = ' - without patch' if no_patch else ''
+ shell_dir = self.shell_dir_no_patch if no_patch else self.shell_dir
+ title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
try:
output = commands.Execute(
- runnable.GetCommand(self.shell_dir, self.extra_flags),
+ runnable.GetCommand(shell_dir, self.extra_flags),
timeout=runnable.timeout,
)
- except OSError as e:
- print ">>> OSError (#%d):" % (count + 1)
+ except OSError as e: # pragma: no cover
+ print title % "OSError"
print e
return ""
- print ">>> Stdout (#%d):" % (count + 1)
+ print title % "Stdout"
print output.stdout
if output.stderr: # pragma: no cover
# Print stderr for debugging.
- print ">>> Stderr (#%d):" % (count + 1)
+ print title % "Stderr"
print output.stderr
if output.timed_out:
print ">>> Test timed out after %ss." % runnable.timeout
@@ -516,15 +652,13 @@ class AndroidPlatform(Platform): # pragma: no cover
if not options.device:
# Detect attached device if not specified.
- devices = pylib.android_commands.GetAttachedDevices(
- hardware=True, emulator=False, offline=False)
+ devices = adb_wrapper.AdbWrapper.Devices()
assert devices and len(devices) == 1, (
"None or multiple devices detected. Please specify the device on "
"the command-line with --device")
- options.device = devices[0]
- adb_wrapper = pylib.android_commands.AndroidCommands(options.device)
- self.device = device_utils.DeviceUtils(adb_wrapper)
- self.adb = adb_wrapper.Adb()
+ options.device = str(devices[0])
+ self.adb_wrapper = adb_wrapper.AdbWrapper(options.device)
+ self.device = device_utils.DeviceUtils(self.adb_wrapper)
def PreExecution(self):
perf = perf_control.PerfControl(self.device)
@@ -538,10 +672,6 @@ class AndroidPlatform(Platform): # pragma: no cover
perf.SetDefaultPerfMode()
self.device.RunShellCommand(["rm", "-rf", AndroidPlatform.DEVICE_DIR])
- def _SendCommand(self, cmd):
- logging.info("adb -s %s %s" % (str(self.device), cmd))
- return self.adb.SendCommand(cmd, timeout_time=60)
-
def _PushFile(self, host_dir, file_name, target_rel=".",
skip_if_missing=False):
file_on_host = os.path.join(host_dir, file_name)
@@ -565,51 +695,59 @@ class AndroidPlatform(Platform): # pragma: no cover
# Work-around for "text file busy" errors. Push the files to a temporary
# location and then copy them with a shell command.
- output = self._SendCommand(
- "push %s %s" % (file_on_host, file_on_device_tmp))
+ output = self.adb_wrapper.Push(file_on_host, file_on_device_tmp)
# Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)".
# Errors look like this: "failed to copy ... ".
if output and not re.search('^[0-9]', output.splitlines()[-1]):
logging.critical('PUSH FAILED: ' + output)
- self._SendCommand("shell mkdir -p %s" % folder_on_device)
- self._SendCommand("shell cp %s %s" % (file_on_device_tmp, file_on_device))
+ self.adb_wrapper.Shell("mkdir -p %s" % folder_on_device)
+ self.adb_wrapper.Shell("cp %s %s" % (file_on_device_tmp, file_on_device))
- def PreTests(self, node, path):
- suite_dir = os.path.abspath(os.path.dirname(path))
- if node.path:
- bench_rel = os.path.normpath(os.path.join(*node.path))
- bench_abs = os.path.join(suite_dir, bench_rel)
- else:
- bench_rel = "."
- bench_abs = suite_dir
-
- self._PushFile(self.shell_dir, node.binary, "bin")
+ def _PushExecutable(self, shell_dir, target_dir, binary):
+ self._PushFile(shell_dir, binary, target_dir)
# Push external startup data. Backwards compatible for revisions where
# these files didn't exist.
self._PushFile(
- self.shell_dir,
+ shell_dir,
"natives_blob.bin",
- "bin",
+ target_dir,
skip_if_missing=True,
)
self._PushFile(
- self.shell_dir,
+ shell_dir,
"snapshot_blob.bin",
- "bin",
+ target_dir,
skip_if_missing=True,
)
- if isinstance(node, Runnable):
+ def PreTests(self, node, path):
+ suite_dir = os.path.abspath(os.path.dirname(path))
+ if node.path:
+ bench_rel = os.path.normpath(os.path.join(*node.path))
+ bench_abs = os.path.join(suite_dir, bench_rel)
+ else:
+ bench_rel = "."
+ bench_abs = suite_dir
+
+ self._PushExecutable(self.shell_dir, "bin", node.binary)
+ if self.shell_dir_no_patch:
+ self._PushExecutable(
+ self.shell_dir_no_patch, "bin_no_patch", node.binary)
+
+ if isinstance(node, RunnableConfig):
self._PushFile(bench_abs, node.main, bench_rel)
for resource in node.resources:
self._PushFile(bench_abs, resource, bench_rel)
- def Run(self, runnable, count):
+ def _Run(self, runnable, count, no_patch=False):
+ suffix = ' - without patch' if no_patch else ''
+ target_dir = "bin_no_patch" if no_patch else "bin"
+ title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
cache = cache_control.CacheControl(self.device)
cache.DropRamCaches()
binary_on_device = os.path.join(
- AndroidPlatform.DEVICE_DIR, "bin", runnable.binary)
+ AndroidPlatform.DEVICE_DIR, target_dir, runnable.binary)
cmd = [binary_on_device] + runnable.GetCommandFlags(self.extra_flags)
# Relative path to benchmark directory.
@@ -626,7 +764,7 @@ class AndroidPlatform(Platform): # pragma: no cover
retries=0,
)
stdout = "\n".join(output)
- print ">>> Stdout (#%d):" % (count + 1)
+ print title % "Stdout"
print stdout
except device_errors.CommandTimeoutError:
print ">>> Test timed out after %ss." % runnable.timeout
@@ -656,8 +794,13 @@ def Main(args):
default="")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
+ parser.add_option("--json-test-results-no-patch",
+ help="Path to a file for storing json results from run "
+ "without patch.")
parser.add_option("--outdir", help="Base directory with compile output",
default="out")
+ parser.add_option("--outdir-no-patch",
+ help="Base directory with compile output without patch")
(options, args) = parser.parse_args(args)
if len(args) == 0: # pragma: no cover
@@ -671,21 +814,35 @@ def Main(args):
print "Unknown architecture %s" % options.arch
return 1
- if (options.device and not options.android_build_tools): # pragma: no cover
+ if options.device and not options.android_build_tools: # pragma: no cover
print "Specifying a device requires Android build tools."
return 1
+ if (options.json_test_results_no_patch and
+ not options.outdir_no_patch): # pragma: no cover
+ print("For writing json test results without patch, an outdir without "
+ "patch must be specified.")
+ return 1
+
workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if options.buildbot:
- options.shell_dir = os.path.join(workspace, options.outdir, "Release")
+ build_config = "Release"
+ else:
+ build_config = "%s.release" % options.arch
+
+ options.shell_dir = os.path.join(workspace, options.outdir, build_config)
+
+ if options.outdir_no_patch:
+ options.shell_dir_no_patch = os.path.join(
+ workspace, options.outdir_no_patch, build_config)
else:
- options.shell_dir = os.path.join(workspace, options.outdir,
- "%s.release" % options.arch)
+ options.shell_dir_no_patch = None
platform = Platform.GetPlatform(options)
results = Results()
+ results_no_patch = Results()
for path in args:
path = os.path.abspath(path)
@@ -703,7 +860,7 @@ def Main(args):
platform.PreExecution()
# Build the graph/trace tree structure.
- root = BuildGraphs(suite, options.arch)
+ root = BuildGraphConfigs(suite, options.arch)
# Callback to be called on each node on traversal.
def NodeCB(node):
@@ -721,8 +878,10 @@ def Main(args):
yield platform.Run(runnable, i)
# Let runnable iterate over all runs and handle output.
- results += runnable.Run(Runner)
-
+ result, result_no_patch = runnable.Run(
+ Runner, trybot=options.shell_dir_no_patch)
+ results += result
+ results_no_patch += result_no_patch
platform.PostExecution()
if options.json_test_results:
@@ -730,6 +889,11 @@ def Main(args):
else: # pragma: no cover
print results
+ if options.json_test_results_no_patch:
+ results_no_patch.WriteToFile(options.json_test_results_no_patch)
+ else: # pragma: no cover
+ print results_no_patch
+
return min(1, len(results.errors))
if __name__ == "__main__": # pragma: no cover
diff --git a/deps/v8/tools/testrunner/local/execution.py b/deps/v8/tools/testrunner/local/execution.py
index dccd52a359..4c63fb6e63 100644
--- a/deps/v8/tools/testrunner/local/execution.py
+++ b/deps/v8/tools/testrunner/local/execution.py
@@ -104,6 +104,8 @@ class Runner(object):
"--stress-opt" in self.context.mode_flags or
"--stress-opt" in self.context.extra_flags):
timeout *= 4
+ if "--noenable-vfp3" in self.context.extra_flags:
+ timeout *= 2
# FIXME(machenbach): Make this more OO. Don't expose default outcomes or
# the like.
if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
diff --git a/deps/v8/tools/testrunner/local/progress.py b/deps/v8/tools/testrunner/local/progress.py
index 469d64bc00..60ec635262 100644
--- a/deps/v8/tools/testrunner/local/progress.py
+++ b/deps/v8/tools/testrunner/local/progress.py
@@ -313,10 +313,11 @@ class JUnitTestProgressIndicator(ProgressIndicator):
class JsonTestProgressIndicator(ProgressIndicator):
- def __init__(self, json_test_results, arch, mode):
+ def __init__(self, json_test_results, arch, mode, random_seed):
self.json_test_results = json_test_results
self.arch = arch
self.mode = mode
+ self.random_seed = random_seed
self.results = []
self.tests = []
@@ -370,6 +371,11 @@ class JsonTestProgressIndicator(ProgressIndicator):
"result": test.suite.GetOutcome(test),
"expected": list(test.outcomes or ["PASS"]),
"duration": test.duration,
+
+ # TODO(machenbach): This stores only the global random seed from the
+ # context and not possible overrides when using random-seed stress.
+ "random_seed": self.random_seed,
+ "variant": test.variant,
})
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 91a3e04525..c8e43521e7 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -35,15 +35,47 @@ from . import utils
from ..objects import testcase
# Use this to run several variants of the tests.
-VARIANT_FLAGS = {
- "default": [],
- "stress": ["--stress-opt", "--always-opt"],
- "turbofan": ["--turbo", "--always-opt"],
- "nocrankshaft": ["--nocrankshaft"]}
+ALL_VARIANT_FLAGS = {
+ "default": [[]],
+ "stress": [["--stress-opt", "--always-opt"]],
+ "turbofan": [["--turbo", "--always-opt"]],
+ "nocrankshaft": [["--nocrankshaft"]],
+}
+
+# FAST_VARIANTS implies no --always-opt.
+FAST_VARIANT_FLAGS = {
+ "default": [[]],
+ "stress": [["--stress-opt"]],
+ "turbofan": [["--turbo"]],
+ "nocrankshaft": [["--nocrankshaft"]],
+}
+
+ALL_VARIANTS = set(["default", "stress", "turbofan", "nocrankshaft"])
+FAST_VARIANTS = set(["default", "turbofan"])
+STANDARD_VARIANT = set(["default"])
+
+
+class VariantGenerator(object):
+ def __init__(self, suite, variants):
+ self.suite = suite
+ self.all_variants = ALL_VARIANTS & variants
+ self.fast_variants = FAST_VARIANTS & variants
+ self.standard_variant = STANDARD_VARIANT & variants
+
+ def FilterVariantsByTest(self, testcase):
+ if testcase.outcomes and statusfile.OnlyStandardVariant(
+ testcase.outcomes):
+ return self.standard_variant
+ if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
+ return self.fast_variants
+ return self.all_variants
+
+ def GetFlagSets(self, testcase, variant):
+ if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
+ return FAST_VARIANT_FLAGS[variant]
+ else:
+ return ALL_VARIANT_FLAGS[variant]
-FAST_VARIANT_FLAGS = [
- f for v, f in VARIANT_FLAGS.iteritems() if v in ["default", "turbofan"]
-]
class TestSuite(object):
@@ -89,15 +121,19 @@ class TestSuite(object):
def ListTests(self, context):
raise NotImplementedError
- def VariantFlags(self, testcase, default_flags):
- if testcase.outcomes and statusfile.OnlyStandardVariant(testcase.outcomes):
- return [[]]
- if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
- # FAST_VARIANTS implies no --always-opt.
- return [ filter(lambda flag: flag != "--always-opt", f)
- for f in filter(lambda flags: flags in FAST_VARIANT_FLAGS,
- default_flags) ]
- return default_flags
+ def _VariantGeneratorFactory(self):
+ """The variant generator class to be used."""
+ return VariantGenerator
+
+ def CreateVariantGenerator(self, variants):
+ """Return a generator for the testing variants of this suite.
+
+ Args:
+ variants: List of variant names to be run as specified by the test
+ runner.
+ Returns: An object of type VariantGenerator.
+ """
+ return self._VariantGeneratorFactory()(self, set(variants))
def DownloadData(self):
pass
@@ -175,10 +211,17 @@ class TestSuite(object):
print("Unused rule: %s -> %s" % (rule, self.wildcards[rule]))
def FilterTestCasesByArgs(self, args):
+ """Filter test cases based on command-line arguments.
+
+ An argument with an asterisk in the end will match all test cases
+ that have the argument as a prefix. Without asterisk, only exact matches
+ will be used with the exeption of the test-suite name as argument.
+ """
filtered = []
- filtered_args = []
+ globs = []
+ exact_matches = []
for a in args:
- argpath = a.split(os.path.sep)
+ argpath = a.split('/')
if argpath[0] != self.name:
continue
if len(argpath) == 1 or (len(argpath) == 2 and argpath[1] == '*'):
@@ -186,12 +229,18 @@ class TestSuite(object):
path = os.path.sep.join(argpath[1:])
if path[-1] == '*':
path = path[:-1]
- filtered_args.append(path)
+ globs.append(path)
+ else:
+ exact_matches.append(path)
for t in self.tests:
- for a in filtered_args:
+ for a in globs:
if t.path.startswith(a):
filtered.append(t)
break
+ for a in exact_matches:
+ if t.path == a:
+ filtered.append(t)
+ break
self.tests = filtered
def GetFlagsForTestCase(self, testcase, context):
@@ -239,6 +288,11 @@ class TestSuite(object):
return self.total_duration
+class StandardVariantGenerator(VariantGenerator):
+ def FilterVariantsByTest(self, testcase):
+ return self.standard_variant
+
+
class GoogleTestSuite(TestSuite):
def __init__(self, name, root):
super(GoogleTestSuite, self).__init__(name, root)
@@ -272,8 +326,8 @@ class GoogleTestSuite(TestSuite):
["--gtest_print_time=0"] +
context.mode_flags)
- def VariantFlags(self, testcase, default_flags):
- return [[]]
+ def _VariantGeneratorFactory(self):
+ return StandardVariantGenerator
def shell(self):
return self.name
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index c7b445eaab..0ab06361b1 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -29,10 +29,12 @@
from . import output
class TestCase(object):
- def __init__(self, suite, path, flags=None, dependency=None):
+ def __init__(self, suite, path, variant='default', flags=None,
+ dependency=None):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.flags = flags or [] # list of strings, flags specific to this test
+ self.variant = variant # name of the used testing variant
self.dependency = dependency # |path| for testcase that must be run first
self.outcomes = set([])
self.output = None
@@ -40,8 +42,9 @@ class TestCase(object):
self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed.
- def CopyAddingFlags(self, flags):
- copy = TestCase(self.suite, self.path, self.flags + flags, self.dependency)
+ def CopyAddingFlags(self, variant, flags):
+ copy = TestCase(self.suite, self.path, variant, self.flags + flags,
+ self.dependency)
copy.outcomes = self.outcomes
return copy
@@ -51,16 +54,16 @@ class TestCase(object):
and returns them as a JSON serializable object.
"""
assert self.id is not None
- return [self.suitename(), self.path, self.flags,
+ return [self.suitename(), self.path, self.variant, self.flags,
self.dependency, list(self.outcomes or []), self.id]
@staticmethod
def UnpackTask(task):
"""Creates a new TestCase object based on packed task data."""
# For the order of the fields, refer to PackTask() above.
- test = TestCase(str(task[0]), task[1], task[2], task[3])
- test.outcomes = set(task[4])
- test.id = task[5]
+ test = TestCase(str(task[0]), task[1], task[2], task[3], task[4])
+ test.outcomes = set(task[5])
+ test.id = task[6]
test.run = 1
return test
diff --git a/deps/v8/tools/unittests/run_perf_test.py b/deps/v8/tools/unittests/run_perf_test.py
index f9ea0c09c9..f3e5aff49f 100644
--- a/deps/v8/tools/unittests/run_perf_test.py
+++ b/deps/v8/tools/unittests/run_perf_test.py
@@ -139,17 +139,17 @@ class PerfTest(unittest.TestCase):
all_args += args
return run_perf.Main(all_args)
- def _LoadResults(self):
- with open(self._test_output) as f:
+ def _LoadResults(self, file_name=None):
+ with open(file_name or self._test_output) as f:
return json.load(f)
- def _VerifyResults(self, suite, units, traces):
+ def _VerifyResults(self, suite, units, traces, file_name=None):
self.assertEquals([
{"units": units,
"graphs": [suite, trace["name"]],
"results": trace["results"],
"stddev": trace["stddev"]} for trace in traces],
- self._LoadResults()["traces"])
+ self._LoadResults(file_name)["traces"])
def _VerifyErrors(self, errors):
self.assertEquals(errors, self._LoadResults()["errors"])
@@ -402,17 +402,56 @@ class PerfTest(unittest.TestCase):
# require lots of complicated mocks for the android tools.
def testAndroid(self):
self._WriteTestInput(V8_JSON)
- platform = run_perf.Platform
+ # FIXME(machenbach): This is not test-local!
+ platform = run_perf.AndroidPlatform
platform.PreExecution = MagicMock(return_value=None)
platform.PostExecution = MagicMock(return_value=None)
platform.PreTests = MagicMock(return_value=None)
platform.Run = MagicMock(
- return_value="Richards: 1.234\nDeltaBlue: 10657567\n")
+ return_value=("Richards: 1.234\nDeltaBlue: 10657567\n", None))
run_perf.AndroidPlatform = MagicMock(return_value=platform)
self.assertEquals(
0, self._CallMain("--android-build-tools", "/some/dir",
- "--arch", "android_arm"))
+ "--arch", "arm"))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
])
+
+ def testTwoRuns_Trybot(self):
+ test_input = dict(V8_JSON)
+ test_input["run_count"] = 2
+ self._WriteTestInput(test_input)
+ self._MockCommand([".", ".", ".", "."],
+ ["Richards: 100\nDeltaBlue: 200\n",
+ "Richards: 200\nDeltaBlue: 20\n",
+ "Richards: 50\nDeltaBlue: 200\n",
+ "Richards: 100\nDeltaBlue: 20\n"])
+ test_output_no_patch = path.join(TEST_WORKSPACE, "results_no_patch.json")
+ self.assertEquals(0, self._CallMain(
+ "--outdir-no-patch", "out-no-patch",
+ "--json-test-results-no-patch", test_output_no_patch,
+ ))
+ self._VerifyResults("test", "score", [
+ {"name": "Richards", "results": ["100.0", "200.0"], "stddev": ""},
+ {"name": "DeltaBlue", "results": ["20.0", "20.0"], "stddev": ""},
+ ])
+ self._VerifyResults("test", "score", [
+ {"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
+ {"name": "DeltaBlue", "results": ["200.0", "200.0"], "stddev": ""},
+ ], test_output_no_patch)
+ self._VerifyErrors([])
+ self._VerifyMockMultiple(
+ (path.join("out", "x64.release", "d7"), "--flag", "run.js"),
+ (path.join("out-no-patch", "x64.release", "d7"), "--flag", "run.js"),
+ (path.join("out", "x64.release", "d7"), "--flag", "run.js"),
+ (path.join("out-no-patch", "x64.release", "d7"), "--flag", "run.js"),
+ )
+
+ def testUnzip(self):
+ def Gen():
+ for i in [1, 2, 3]:
+ yield i, i + 1
+ l, r = run_perf.Unzip(Gen())
+ self.assertEquals([1, 2, 3], list(l()))
+ self.assertEquals([2, 3, 4], list(r()))
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index af8b6b6890..f3d5d15ab5 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -51,86 +51,87 @@ INSTANCE_TYPES = {
22: "SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
26: "SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
128: "SYMBOL_TYPE",
+ 134: "FLOAT32X4_TYPE",
129: "MAP_TYPE",
130: "CODE_TYPE",
131: "ODDBALL_TYPE",
- 181: "CELL_TYPE",
- 183: "PROPERTY_CELL_TYPE",
+ 182: "CELL_TYPE",
+ 184: "PROPERTY_CELL_TYPE",
132: "HEAP_NUMBER_TYPE",
133: "MUTABLE_HEAP_NUMBER_TYPE",
- 134: "FLOAT32X4_TYPE",
135: "FOREIGN_TYPE",
136: "BYTE_ARRAY_TYPE",
- 137: "FREE_SPACE_TYPE",
- 138: "EXTERNAL_INT8_ARRAY_TYPE",
- 139: "EXTERNAL_UINT8_ARRAY_TYPE",
- 140: "EXTERNAL_INT16_ARRAY_TYPE",
- 141: "EXTERNAL_UINT16_ARRAY_TYPE",
- 142: "EXTERNAL_INT32_ARRAY_TYPE",
- 143: "EXTERNAL_UINT32_ARRAY_TYPE",
- 144: "EXTERNAL_FLOAT32_ARRAY_TYPE",
- 145: "EXTERNAL_FLOAT64_ARRAY_TYPE",
- 146: "EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE",
- 147: "FIXED_INT8_ARRAY_TYPE",
- 148: "FIXED_UINT8_ARRAY_TYPE",
- 149: "FIXED_INT16_ARRAY_TYPE",
- 150: "FIXED_UINT16_ARRAY_TYPE",
- 151: "FIXED_INT32_ARRAY_TYPE",
- 152: "FIXED_UINT32_ARRAY_TYPE",
- 153: "FIXED_FLOAT32_ARRAY_TYPE",
- 154: "FIXED_FLOAT64_ARRAY_TYPE",
- 155: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
- 157: "FILLER_TYPE",
- 158: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
- 159: "DECLARED_ACCESSOR_INFO_TYPE",
- 160: "EXECUTABLE_ACCESSOR_INFO_TYPE",
- 161: "ACCESSOR_PAIR_TYPE",
- 162: "ACCESS_CHECK_INFO_TYPE",
- 163: "INTERCEPTOR_INFO_TYPE",
- 164: "CALL_HANDLER_INFO_TYPE",
- 165: "FUNCTION_TEMPLATE_INFO_TYPE",
- 166: "OBJECT_TEMPLATE_INFO_TYPE",
- 167: "SIGNATURE_INFO_TYPE",
- 168: "TYPE_SWITCH_INFO_TYPE",
- 170: "ALLOCATION_MEMENTO_TYPE",
- 169: "ALLOCATION_SITE_TYPE",
- 171: "SCRIPT_TYPE",
- 172: "CODE_CACHE_TYPE",
- 173: "POLYMORPHIC_CODE_CACHE_TYPE",
- 174: "TYPE_FEEDBACK_INFO_TYPE",
- 175: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 176: "BOX_TYPE",
- 184: "PROTOTYPE_INFO_TYPE",
- 179: "FIXED_ARRAY_TYPE",
- 156: "FIXED_DOUBLE_ARRAY_TYPE",
- 180: "SHARED_FUNCTION_INFO_TYPE",
- 182: "WEAK_CELL_TYPE",
- 188: "JS_MESSAGE_OBJECT_TYPE",
- 187: "JS_VALUE_TYPE",
- 189: "JS_DATE_TYPE",
- 190: "JS_OBJECT_TYPE",
- 191: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 192: "JS_GENERATOR_OBJECT_TYPE",
- 193: "JS_MODULE_TYPE",
- 194: "JS_GLOBAL_OBJECT_TYPE",
- 195: "JS_BUILTINS_OBJECT_TYPE",
- 196: "JS_GLOBAL_PROXY_TYPE",
- 197: "JS_ARRAY_TYPE",
- 198: "JS_ARRAY_BUFFER_TYPE",
- 199: "JS_TYPED_ARRAY_TYPE",
- 200: "JS_DATA_VIEW_TYPE",
- 186: "JS_PROXY_TYPE",
- 201: "JS_SET_TYPE",
- 202: "JS_MAP_TYPE",
- 203: "JS_SET_ITERATOR_TYPE",
- 204: "JS_MAP_ITERATOR_TYPE",
- 205: "JS_WEAK_MAP_TYPE",
- 206: "JS_WEAK_SET_TYPE",
- 207: "JS_REGEXP_TYPE",
- 208: "JS_FUNCTION_TYPE",
- 185: "JS_FUNCTION_PROXY_TYPE",
- 177: "DEBUG_INFO_TYPE",
- 178: "BREAK_POINT_INFO_TYPE",
+ 137: "BYTECODE_ARRAY_TYPE",
+ 138: "FREE_SPACE_TYPE",
+ 139: "EXTERNAL_INT8_ARRAY_TYPE",
+ 140: "EXTERNAL_UINT8_ARRAY_TYPE",
+ 141: "EXTERNAL_INT16_ARRAY_TYPE",
+ 142: "EXTERNAL_UINT16_ARRAY_TYPE",
+ 143: "EXTERNAL_INT32_ARRAY_TYPE",
+ 144: "EXTERNAL_UINT32_ARRAY_TYPE",
+ 145: "EXTERNAL_FLOAT32_ARRAY_TYPE",
+ 146: "EXTERNAL_FLOAT64_ARRAY_TYPE",
+ 147: "EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE",
+ 148: "FIXED_INT8_ARRAY_TYPE",
+ 149: "FIXED_UINT8_ARRAY_TYPE",
+ 150: "FIXED_INT16_ARRAY_TYPE",
+ 151: "FIXED_UINT16_ARRAY_TYPE",
+ 152: "FIXED_INT32_ARRAY_TYPE",
+ 153: "FIXED_UINT32_ARRAY_TYPE",
+ 154: "FIXED_FLOAT32_ARRAY_TYPE",
+ 155: "FIXED_FLOAT64_ARRAY_TYPE",
+ 156: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
+ 158: "FILLER_TYPE",
+ 159: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
+ 160: "DECLARED_ACCESSOR_INFO_TYPE",
+ 161: "EXECUTABLE_ACCESSOR_INFO_TYPE",
+ 162: "ACCESSOR_PAIR_TYPE",
+ 163: "ACCESS_CHECK_INFO_TYPE",
+ 164: "INTERCEPTOR_INFO_TYPE",
+ 165: "CALL_HANDLER_INFO_TYPE",
+ 166: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 167: "OBJECT_TEMPLATE_INFO_TYPE",
+ 168: "SIGNATURE_INFO_TYPE",
+ 169: "TYPE_SWITCH_INFO_TYPE",
+ 171: "ALLOCATION_MEMENTO_TYPE",
+ 170: "ALLOCATION_SITE_TYPE",
+ 172: "SCRIPT_TYPE",
+ 173: "CODE_CACHE_TYPE",
+ 174: "POLYMORPHIC_CODE_CACHE_TYPE",
+ 175: "TYPE_FEEDBACK_INFO_TYPE",
+ 176: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 177: "BOX_TYPE",
+ 185: "PROTOTYPE_INFO_TYPE",
+ 180: "FIXED_ARRAY_TYPE",
+ 157: "FIXED_DOUBLE_ARRAY_TYPE",
+ 181: "SHARED_FUNCTION_INFO_TYPE",
+ 183: "WEAK_CELL_TYPE",
+ 189: "JS_MESSAGE_OBJECT_TYPE",
+ 188: "JS_VALUE_TYPE",
+ 190: "JS_DATE_TYPE",
+ 191: "JS_OBJECT_TYPE",
+ 192: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 193: "JS_GENERATOR_OBJECT_TYPE",
+ 194: "JS_MODULE_TYPE",
+ 195: "JS_GLOBAL_OBJECT_TYPE",
+ 196: "JS_BUILTINS_OBJECT_TYPE",
+ 197: "JS_GLOBAL_PROXY_TYPE",
+ 198: "JS_ARRAY_TYPE",
+ 199: "JS_ARRAY_BUFFER_TYPE",
+ 200: "JS_TYPED_ARRAY_TYPE",
+ 201: "JS_DATA_VIEW_TYPE",
+ 187: "JS_PROXY_TYPE",
+ 202: "JS_SET_TYPE",
+ 203: "JS_MAP_TYPE",
+ 204: "JS_SET_ITERATOR_TYPE",
+ 205: "JS_MAP_ITERATOR_TYPE",
+ 206: "JS_WEAK_MAP_TYPE",
+ 207: "JS_WEAK_SET_TYPE",
+ 208: "JS_REGEXP_TYPE",
+ 209: "JS_FUNCTION_TYPE",
+ 186: "JS_FUNCTION_PROXY_TYPE",
+ 178: "DEBUG_INFO_TYPE",
+ 179: "BREAK_POINT_INFO_TYPE",
}
# List of known V8 maps.
@@ -138,35 +139,35 @@ KNOWN_MAPS = {
0x08081: (136, "ByteArrayMap"),
0x080ad: (129, "MetaMap"),
0x080d9: (131, "NullMap"),
- 0x08105: (179, "FixedArrayMap"),
+ 0x08105: (180, "FixedArrayMap"),
0x08131: (4, "OneByteInternalizedStringMap"),
- 0x0815d: (182, "WeakCellMap"),
- 0x08189: (131, "UndefinedMap"),
- 0x081b5: (132, "HeapNumberMap"),
- 0x081e1: (137, "FreeSpaceMap"),
- 0x0820d: (157, "OnePointerFillerMap"),
- 0x08239: (157, "TwoPointerFillerMap"),
- 0x08265: (131, "TheHoleMap"),
+ 0x0815d: (183, "WeakCellMap"),
+ 0x08189: (131, "TheHoleMap"),
+ 0x081b5: (138, "FreeSpaceMap"),
+ 0x081e1: (158, "OnePointerFillerMap"),
+ 0x0820d: (158, "TwoPointerFillerMap"),
+ 0x08239: (131, "UndefinedMap"),
+ 0x08265: (132, "HeapNumberMap"),
0x08291: (131, "BooleanMap"),
0x082bd: (131, "UninitializedMap"),
- 0x082e9: (181, "CellMap"),
- 0x08315: (183, "GlobalPropertyCellMap"),
- 0x08341: (180, "SharedFunctionInfoMap"),
+ 0x082e9: (182, "CellMap"),
+ 0x08315: (184, "GlobalPropertyCellMap"),
+ 0x08341: (181, "SharedFunctionInfoMap"),
0x0836d: (133, "MutableHeapNumberMap"),
0x08399: (134, "Float32x4Map"),
- 0x083c5: (179, "NativeContextMap"),
+ 0x083c5: (180, "NativeContextMap"),
0x083f1: (130, "CodeMap"),
- 0x0841d: (179, "ScopeInfoMap"),
- 0x08449: (179, "FixedCOWArrayMap"),
- 0x08475: (156, "FixedDoubleArrayMap"),
+ 0x0841d: (180, "ScopeInfoMap"),
+ 0x08449: (180, "FixedCOWArrayMap"),
+ 0x08475: (157, "FixedDoubleArrayMap"),
0x084a1: (68, "OneByteStringMap"),
- 0x084cd: (179, "FunctionContextMap"),
+ 0x084cd: (180, "FunctionContextMap"),
0x084f9: (131, "NoInterceptorResultSentinelMap"),
0x08525: (131, "ArgumentsMarkerMap"),
0x08551: (131, "ExceptionMap"),
0x0857d: (131, "TerminationExceptionMap"),
- 0x085a9: (179, "HashTableMap"),
- 0x085d5: (179, "OrderedHashTableMap"),
+ 0x085a9: (180, "HashTableMap"),
+ 0x085d5: (180, "OrderedHashTableMap"),
0x08601: (128, "SymbolMap"),
0x0862d: (64, "StringMap"),
0x08659: (69, "ConsOneByteStringMap"),
@@ -187,54 +188,55 @@ KNOWN_MAPS = {
0x088ed: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
0x08919: (22, "ShortExternalOneByteInternalizedStringMap"),
0x08945: (86, "ShortExternalOneByteStringMap"),
- 0x08971: (138, "ExternalInt8ArrayMap"),
- 0x0899d: (139, "ExternalUint8ArrayMap"),
- 0x089c9: (140, "ExternalInt16ArrayMap"),
- 0x089f5: (141, "ExternalUint16ArrayMap"),
- 0x08a21: (142, "ExternalInt32ArrayMap"),
- 0x08a4d: (143, "ExternalUint32ArrayMap"),
- 0x08a79: (144, "ExternalFloat32ArrayMap"),
- 0x08aa5: (145, "ExternalFloat64ArrayMap"),
- 0x08ad1: (146, "ExternalUint8ClampedArrayMap"),
- 0x08afd: (148, "FixedUint8ArrayMap"),
- 0x08b29: (147, "FixedInt8ArrayMap"),
- 0x08b55: (150, "FixedUint16ArrayMap"),
- 0x08b81: (149, "FixedInt16ArrayMap"),
- 0x08bad: (152, "FixedUint32ArrayMap"),
- 0x08bd9: (151, "FixedInt32ArrayMap"),
- 0x08c05: (153, "FixedFloat32ArrayMap"),
- 0x08c31: (154, "FixedFloat64ArrayMap"),
- 0x08c5d: (155, "FixedUint8ClampedArrayMap"),
- 0x08c89: (179, "SloppyArgumentsElementsMap"),
- 0x08cb5: (179, "CatchContextMap"),
- 0x08ce1: (179, "WithContextMap"),
- 0x08d0d: (179, "BlockContextMap"),
- 0x08d39: (179, "ModuleContextMap"),
- 0x08d65: (179, "ScriptContextMap"),
- 0x08d91: (179, "ScriptContextTableMap"),
- 0x08dbd: (188, "JSMessageObjectMap"),
+ 0x08971: (139, "ExternalInt8ArrayMap"),
+ 0x0899d: (140, "ExternalUint8ArrayMap"),
+ 0x089c9: (141, "ExternalInt16ArrayMap"),
+ 0x089f5: (142, "ExternalUint16ArrayMap"),
+ 0x08a21: (143, "ExternalInt32ArrayMap"),
+ 0x08a4d: (144, "ExternalUint32ArrayMap"),
+ 0x08a79: (145, "ExternalFloat32ArrayMap"),
+ 0x08aa5: (146, "ExternalFloat64ArrayMap"),
+ 0x08ad1: (147, "ExternalUint8ClampedArrayMap"),
+ 0x08afd: (149, "FixedUint8ArrayMap"),
+ 0x08b29: (148, "FixedInt8ArrayMap"),
+ 0x08b55: (151, "FixedUint16ArrayMap"),
+ 0x08b81: (150, "FixedInt16ArrayMap"),
+ 0x08bad: (153, "FixedUint32ArrayMap"),
+ 0x08bd9: (152, "FixedInt32ArrayMap"),
+ 0x08c05: (154, "FixedFloat32ArrayMap"),
+ 0x08c31: (155, "FixedFloat64ArrayMap"),
+ 0x08c5d: (156, "FixedUint8ClampedArrayMap"),
+ 0x08c89: (180, "SloppyArgumentsElementsMap"),
+ 0x08cb5: (180, "CatchContextMap"),
+ 0x08ce1: (180, "WithContextMap"),
+ 0x08d0d: (180, "BlockContextMap"),
+ 0x08d39: (180, "ModuleContextMap"),
+ 0x08d65: (180, "ScriptContextMap"),
+ 0x08d91: (180, "ScriptContextTableMap"),
+ 0x08dbd: (189, "JSMessageObjectMap"),
0x08de9: (135, "ForeignMap"),
- 0x08e15: (190, "NeanderMap"),
- 0x08e41: (190, "ExternalMap"),
- 0x08e6d: (170, "AllocationMementoMap"),
- 0x08e99: (169, "AllocationSiteMap"),
- 0x08ec5: (173, "PolymorphicCodeCacheMap"),
- 0x08ef1: (171, "ScriptMap"),
- 0x0907d: (176, "BoxMap"),
- 0x090a9: (160, "ExecutableAccessorInfoMap"),
- 0x090d5: (161, "AccessorPairMap"),
- 0x09101: (162, "AccessCheckInfoMap"),
- 0x0912d: (163, "InterceptorInfoMap"),
- 0x09159: (164, "CallHandlerInfoMap"),
- 0x09185: (165, "FunctionTemplateInfoMap"),
- 0x091b1: (166, "ObjectTemplateInfoMap"),
- 0x091dd: (168, "TypeSwitchInfoMap"),
- 0x09209: (172, "CodeCacheMap"),
- 0x09235: (174, "TypeFeedbackInfoMap"),
- 0x09261: (175, "AliasedArgumentsEntryMap"),
- 0x0928d: (177, "DebugInfoMap"),
- 0x092b9: (178, "BreakPointInfoMap"),
- 0x092e5: (184, "PrototypeInfoMap"),
+ 0x08e15: (191, "NeanderMap"),
+ 0x08e41: (191, "ExternalMap"),
+ 0x08e6d: (171, "AllocationMementoMap"),
+ 0x08e99: (170, "AllocationSiteMap"),
+ 0x08ec5: (174, "PolymorphicCodeCacheMap"),
+ 0x08ef1: (172, "ScriptMap"),
+ 0x09101: (161, "ExecutableAccessorInfoMap"),
+ 0x09159: (162, "AccessorPairMap"),
+ 0x09209: (185, "PrototypeInfoMap"),
+ 0x09839: (137, "BytecodeArrayMap"),
+ 0x09865: (177, "BoxMap"),
+ 0x09891: (163, "AccessCheckInfoMap"),
+ 0x098bd: (164, "InterceptorInfoMap"),
+ 0x098e9: (165, "CallHandlerInfoMap"),
+ 0x09915: (166, "FunctionTemplateInfoMap"),
+ 0x09941: (167, "ObjectTemplateInfoMap"),
+ 0x0996d: (169, "TypeSwitchInfoMap"),
+ 0x09999: (173, "CodeCacheMap"),
+ 0x099c5: (175, "TypeFeedbackInfoMap"),
+ 0x099f1: (176, "AliasedArgumentsEntryMap"),
+ 0x09a1d: (178, "DebugInfoMap"),
+ 0x09a49: (179, "BreakPointInfoMap"),
}
# List of known V8 objects.
@@ -242,60 +244,66 @@ KNOWN_OBJECTS = {
("OLD_SPACE", 0x08081): "NullValue",
("OLD_SPACE", 0x08091): "EmptyDescriptorArray",
("OLD_SPACE", 0x08099): "EmptyFixedArray",
- ("OLD_SPACE", 0x080bd): "UndefinedValue",
- ("OLD_SPACE", 0x080e5): "NanValue",
- ("OLD_SPACE", 0x080f1): "TheHoleValue",
- ("OLD_SPACE", 0x08111): "TrueValue",
- ("OLD_SPACE", 0x08131): "FalseValue",
- ("OLD_SPACE", 0x08155): "empty_string",
- ("OLD_SPACE", 0x08161): "UninitializedValue",
- ("OLD_SPACE", 0x0818d): "EmptyByteArray",
- ("OLD_SPACE", 0x08195): "NoInterceptorResultSentinel",
- ("OLD_SPACE", 0x081d1): "ArgumentsMarker",
- ("OLD_SPACE", 0x081fd): "Exception",
- ("OLD_SPACE", 0x08225): "TerminationException",
- ("OLD_SPACE", 0x08259): "NumberStringCache",
- ("OLD_SPACE", 0x08a61): "SingleCharacterStringCache",
- ("OLD_SPACE", 0x08ef9): "StringSplitCache",
- ("OLD_SPACE", 0x09301): "RegExpMultipleCache",
- ("OLD_SPACE", 0x09709): "EmptyExternalInt8Array",
- ("OLD_SPACE", 0x09715): "EmptyExternalUint8Array",
- ("OLD_SPACE", 0x09721): "EmptyExternalInt16Array",
- ("OLD_SPACE", 0x0972d): "EmptyExternalUint16Array",
- ("OLD_SPACE", 0x09739): "EmptyExternalInt32Array",
- ("OLD_SPACE", 0x09745): "EmptyExternalUint32Array",
- ("OLD_SPACE", 0x09751): "EmptyExternalFloat32Array",
- ("OLD_SPACE", 0x0975d): "EmptyExternalFloat64Array",
- ("OLD_SPACE", 0x09769): "EmptyExternalUint8ClampedArray",
- ("OLD_SPACE", 0x09775): "EmptyFixedUint8Array",
- ("OLD_SPACE", 0x0977d): "EmptyFixedInt8Array",
- ("OLD_SPACE", 0x09785): "EmptyFixedUint16Array",
- ("OLD_SPACE", 0x0978d): "EmptyFixedInt16Array",
- ("OLD_SPACE", 0x09795): "EmptyFixedUint32Array",
- ("OLD_SPACE", 0x0979d): "EmptyFixedInt32Array",
- ("OLD_SPACE", 0x097a5): "EmptyFixedFloat32Array",
- ("OLD_SPACE", 0x097ad): "EmptyFixedFloat64Array",
- ("OLD_SPACE", 0x097b5): "EmptyFixedUint8ClampedArray",
- ("OLD_SPACE", 0x097bd): "InfinityValue",
- ("OLD_SPACE", 0x097c9): "MinusZeroValue",
- ("OLD_SPACE", 0x097d5): "MessageListeners",
- ("OLD_SPACE", 0x097f1): "CodeStubs",
- ("OLD_SPACE", 0x12439): "ArrayProtector",
- ("OLD_SPACE", 0x12dfd): "KeyedLoadDummyVector",
- ("OLD_SPACE", 0x13cc5): "NonMonomorphicCache",
- ("OLD_SPACE", 0x14009): "PolymorphicCodeCache",
- ("OLD_SPACE", 0x14011): "NativesSourceCache",
- ("OLD_SPACE", 0x142d1): "ExperimentalNativesSourceCache",
- ("OLD_SPACE", 0x14309): "ExtraNativesSourceCache",
- ("OLD_SPACE", 0x14315): "EmptyScript",
- ("OLD_SPACE", 0x14351): "IntrinsicFunctionNames",
- ("OLD_SPACE", 0x233d5): "UndefinedCell",
- ("OLD_SPACE", 0x233dd): "ObservationState",
- ("OLD_SPACE", 0x233e9): "SymbolRegistry",
- ("OLD_SPACE", 0x2429d): "EmptySlowElementDictionary",
- ("OLD_SPACE", 0x242c5): "AllocationSitesScratchpad",
- ("OLD_SPACE", 0x246cd): "WeakObjectToCodeTable",
- ("OLD_SPACE", 0x4a5c1): "StringTable",
- ("CODE_SPACE", 0x180c1): "JsEntryCode",
- ("CODE_SPACE", 0x25c41): "JsConstructEntryCode",
+ ("OLD_SPACE", 0x080bd): "TheHoleValue",
+ ("OLD_SPACE", 0x080dd): "UndefinedValue",
+ ("OLD_SPACE", 0x08105): "NanValue",
+ ("OLD_SPACE", 0x08115): "TrueValue",
+ ("OLD_SPACE", 0x08135): "FalseValue",
+ ("OLD_SPACE", 0x08159): "empty_string",
+ ("OLD_SPACE", 0x08165): "UninitializedValue",
+ ("OLD_SPACE", 0x08191): "EmptyByteArray",
+ ("OLD_SPACE", 0x08199): "NoInterceptorResultSentinel",
+ ("OLD_SPACE", 0x081d5): "ArgumentsMarker",
+ ("OLD_SPACE", 0x08201): "Exception",
+ ("OLD_SPACE", 0x08229): "TerminationException",
+ ("OLD_SPACE", 0x0825d): "NumberStringCache",
+ ("OLD_SPACE", 0x08a65): "SingleCharacterStringCache",
+ ("OLD_SPACE", 0x08efd): "StringSplitCache",
+ ("OLD_SPACE", 0x09305): "RegExpMultipleCache",
+ ("OLD_SPACE", 0x0970d): "EmptyExternalInt8Array",
+ ("OLD_SPACE", 0x09719): "EmptyExternalUint8Array",
+ ("OLD_SPACE", 0x09725): "EmptyExternalInt16Array",
+ ("OLD_SPACE", 0x09731): "EmptyExternalUint16Array",
+ ("OLD_SPACE", 0x0973d): "EmptyExternalInt32Array",
+ ("OLD_SPACE", 0x09749): "EmptyExternalUint32Array",
+ ("OLD_SPACE", 0x09755): "EmptyExternalFloat32Array",
+ ("OLD_SPACE", 0x09761): "EmptyExternalFloat64Array",
+ ("OLD_SPACE", 0x0976d): "EmptyExternalUint8ClampedArray",
+ ("OLD_SPACE", 0x09779): "EmptyFixedUint8Array",
+ ("OLD_SPACE", 0x09789): "EmptyFixedInt8Array",
+ ("OLD_SPACE", 0x09799): "EmptyFixedUint16Array",
+ ("OLD_SPACE", 0x097a9): "EmptyFixedInt16Array",
+ ("OLD_SPACE", 0x097b9): "EmptyFixedUint32Array",
+ ("OLD_SPACE", 0x097c9): "EmptyFixedInt32Array",
+ ("OLD_SPACE", 0x097d9): "EmptyFixedFloat32Array",
+ ("OLD_SPACE", 0x097e9): "EmptyFixedFloat64Array",
+ ("OLD_SPACE", 0x097f9): "EmptyFixedUint8ClampedArray",
+ ("OLD_SPACE", 0x0980d): "InfinityValue",
+ ("OLD_SPACE", 0x0981d): "MinusZeroValue",
+ ("OLD_SPACE", 0x0982d): "MinusInfinityValue",
+ ("OLD_SPACE", 0x09839): "MessageListeners",
+ ("OLD_SPACE", 0x09855): "CodeStubs",
+ ("OLD_SPACE", 0x0e52d): "ArrayProtector",
+ ("OLD_SPACE", 0x0e9a1): "KeyedLoadDummyVector",
+ ("OLD_SPACE", 0x13ded): "NonMonomorphicCache",
+ ("OLD_SPACE", 0x14131): "PolymorphicCodeCache",
+ ("OLD_SPACE", 0x14139): "NativesSourceCache",
+ ("OLD_SPACE", 0x14429): "ExperimentalNativesSourceCache",
+ ("OLD_SPACE", 0x14461): "ExtraNativesSourceCache",
+ ("OLD_SPACE", 0x1446d): "CodeStubNativesSourceCache",
+ ("OLD_SPACE", 0x1448d): "EmptyScript",
+ ("OLD_SPACE", 0x144cd): "IntrinsicFunctionNames",
+ ("OLD_SPACE", 0x240e1): "UndefinedCell",
+ ("OLD_SPACE", 0x240e9): "ObservationState",
+ ("OLD_SPACE", 0x240f5): "SymbolRegistry",
+ ("OLD_SPACE", 0x24f9d): "EmptySlowElementDictionary",
+ ("OLD_SPACE", 0x24fc5): "AllocationSitesScratchpad",
+ ("OLD_SPACE", 0x253cd): "WeakObjectToCodeTable",
+ ("OLD_SPACE", 0x25461): "EmptyPropertyCell",
+ ("OLD_SPACE", 0x25471): "CodeStubContext",
+ ("OLD_SPACE", 0x2ba11): "CodeStubExportsObject",
+ ("OLD_SPACE", 0x2be89): "EmptyBytecodeArray",
+ ("OLD_SPACE", 0x594dd): "StringTable",
+ ("CODE_SPACE", 0x16341): "JsEntryCode",
+ ("CODE_SPACE", 0x26a61): "JsConstructEntryCode",
}
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index d8a81c9c13..d1395f5d91 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -5,4 +5,4 @@ Try to write something funny. And please don't add trailing whitespace.
A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
-The Smi looked at them when a crazy v8-autoroll account showed up.......
+The Smi looked at them when a crazy v8-autoroll account showed up..